file_id stringlengths 8 9 | content stringlengths 498 3.56M | local_path stringlengths 69 70 | kaggle_dataset_name stringlengths 3 50 ⌀ | kaggle_dataset_owner stringlengths 5 20 ⌀ | kversion stringlengths 499 757 ⌀ | kversion_datasetsources stringlengths 75 4.79k ⌀ | dataset_versions stringlengths 341 235k ⌀ | datasets stringlengths 337 371 ⌀ | users stringlengths 111 264 ⌀ | script stringlengths 101 953k |
|---|---|---|---|---|---|---|---|---|---|---|
69046156 | [{"cell_type": "markdown", "source": "# Tomato Leaf Disease Detection 0.998 [inference]", "metadata": {}}, {"cell_type": "markdown", "source": "### Hi kagglers, This is `inference` notebook using `Keras`.\n\n> \n> [Tomato Leaf Disease Detection 0.998 [Training]](https://www.kaggle.com/ammarnassanalhajali/tomato-leaf-disease-detection-0-998-training)\n\n\n\n### Please if this kernel is useful, <font color='red'>please upvote !!</font>", "metadata": {}}, {"cell_type": "code", "source": "import os, cv2, json\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nfrom PIL import Image\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf\nfrom tensorflow.keras import models, layers\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.applications import InceptionV3\nfrom tensorflow.keras.optimizers import Adam\n\nfrom PIL import Image\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Input, BatchNormalization, GlobalAveragePooling2D", "metadata": {"papermill": {"duration": 5.665233, "end_time": "2021-05-15T09:39:21.887766", "exception": false, "start_time": "2021-05-15T09:39:16.222533", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:20.057816Z", "iopub.execute_input": "2021-06-03T16:46:20.058226Z", "iopub.status.idle": "2021-06-03T16:46:25.662132Z", "shell.execute_reply.started": "2021-06-03T16:46:20.058145Z", "shell.execute_reply": "2021-06-03T16:46:25.661227Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train= pd.read_csv(\"../input/tomato-diseases-dataset-csvimages/train.csv\")", "metadata": {"papermill": {"duration": 0.151608, "end_time": "2021-05-15T09:39:22.047606", "exception": false, "start_time": "2021-05-15T09:39:21.895998", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:25.664688Z", "iopub.execute_input": "2021-06-03T16:46:25.66506Z", "iopub.status.idle": "2021-06-03T16:46:25.791707Z", "shell.execute_reply.started": "2021-06-03T16:46:25.665025Z", "shell.execute_reply": "2021-06-03T16:46:25.790892Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "from sklearn.model_selection import train_test_split\ndf_train, df_validate, y_train, y_test = train_test_split(train, train.label, \n train_size=0.8, \n random_state=42,\n stratify=train.label)", "metadata": {"papermill": {"duration": 0.036995, "end_time": "2021-05-15T09:39:22.092571", "exception": false, "start_time": "2021-05-15T09:39:22.055576", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:25.793582Z", "iopub.execute_input": "2021-06-03T16:46:25.793946Z", "iopub.status.idle": "2021-06-03T16:46:25.82036Z", "shell.execute_reply.started": "2021-06-03T16:46:25.793886Z", "shell.execute_reply": "2021-06-03T16:46:25.819508Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df_train = df_train.reset_index(drop=True)\ndf_validate = df_validate.reset_index(drop=True)", "metadata": {"papermill": {"duration": 0.015947, "end_time": "2021-05-15T09:39:22.116386", "exception": false, "start_time": "2021-05-15T09:39:22.100439", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:25.82199Z", "iopub.execute_input": "2021-06-03T16:46:25.822331Z", "iopub.status.idle": "2021-06-03T16:46:25.828197Z", "shell.execute_reply.started": "2021-06-03T16:46:25.822295Z", "shell.execute_reply": "2021-06-03T16:46:25.827162Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "sample = df_train[df_train.label == 3].sample(3)\nplt.figure(figsize=(15, 5))\nfor ind, (img, label) in enumerate(zip(sample.img, sample.label)):\n plt.subplot(1, 3, ind + 1)\n img = cv2.imread(os.path.join(\"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images\", img))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n plt.imshow(img)\n plt.axis(\"off\")\n \nplt.show()", "metadata": {"papermill": {"duration": 0.365515, "end_time": "2021-05-15T09:39:22.489859", "exception": false, "start_time": "2021-05-15T09:39:22.124344", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:25.829814Z", "iopub.execute_input": "2021-06-03T16:46:25.830245Z", "iopub.status.idle": "2021-06-03T16:46:26.33528Z", "shell.execute_reply.started": "2021-06-03T16:46:25.830209Z", "shell.execute_reply": "2021-06-03T16:46:26.334467Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Main parameters\nBATCH_SIZE = 16\nSTEPS_PER_EPOCH = len(train)*0.8 / BATCH_SIZE\nVALIDATION_STEPS = len(train)*0.2 / BATCH_SIZE\nEPOCHS =60 #\nIMG_WIDTH= 256\nIMG_HEIGHT= 256\ntrain_dir = \"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images\"", "metadata": {"papermill": {"duration": 0.024928, "end_time": "2021-05-15T09:39:22.532076", "exception": false, "start_time": "2021-05-15T09:39:22.507148", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:26.336318Z", "iopub.execute_input": "2021-06-03T16:46:26.336612Z", "iopub.status.idle": "2021-06-03T16:46:26.342251Z", "shell.execute_reply.started": "2021-06-03T16:46:26.336581Z", "shell.execute_reply": "2021-06-03T16:46:26.341151Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df_train.label = df_train.label.astype('str')\ndf_validate.label = df_validate.label.astype('str')", "metadata": {"papermill": {"duration": 0.043244, "end_time": "2021-05-15T09:39:22.592353", "exception": false, "start_time": "2021-05-15T09:39:22.549109", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:26.343576Z", "iopub.execute_input": "2021-06-03T16:46:26.343945Z", "iopub.status.idle": "2021-06-03T16:46:26.37629Z", "shell.execute_reply.started": "2021-06-03T16:46:26.343886Z", "shell.execute_reply": "2021-06-03T16:46:26.375482Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train_datagen = ImageDataGenerator(rescale=1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n rotation_range = 180,\n vertical_flip = True,\n horizontal_flip = True)\n# our train_datagen generator will use the following transformations on the images\nvalidation_datagen = ImageDataGenerator(rescale=1./255)\n\n\n\n\n\ntrain_generator = train_datagen.flow_from_dataframe(df_train, \n train_dir,\n target_size=(IMG_WIDTH, IMG_HEIGHT),\n batch_size=BATCH_SIZE,\n x_col='img',\n y_col='label',\n class_mode = 'categorical')\n\n# generator = ImageDataGenerator(*args).flow_from_dataframe(dataframe, directory, target_size,\n# batch_size, x_col, y_col, class_mode)\n# your dataframe shoudl be in the format such that x_col = features, y_col = class/label\n# binary class mode since output is either 0(dog) or 1(cat)\n\nvalidation_generator = validation_datagen.flow_from_dataframe(df_validate, \n train_dir,\n target_size=(IMG_WIDTH, IMG_HEIGHT),\n x_col='img',\n y_col='label',\n class_mode='categorical', \n batch_size=BATCH_SIZE)", "metadata": {"papermill": {"duration": 11.345805, "end_time": "2021-05-15T09:39:33.95501", "exception": false, "start_time": "2021-05-15T09:39:22.609205", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:26.37756Z", "iopub.execute_input": "2021-06-03T16:46:26.377897Z", "iopub.status.idle": "2021-06-03T16:46:46.783012Z", "shell.execute_reply.started": "2021-06-03T16:46:26.377859Z", "shell.execute_reply": "2021-06-03T16:46:46.782169Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "def create_model():\n efficientnet_layers = InceptionV3(weights='imagenet', \n include_top=False, \n input_shape = (IMG_WIDTH, IMG_HEIGHT, 3),\n pooling='avg')\n\n model = Sequential()\n model.add(efficientnet_layers)\n model.add(Dense(10, activation=\"softmax\"))\n model.compile(optimizer = Adam(lr = 0.001),\n loss = \"categorical_crossentropy\",\n metrics = [\"acc\"])\n\n return model\n", "metadata": {"papermill": {"duration": 0.025658, "end_time": "2021-05-15T09:39:33.999009", "exception": false, "start_time": "2021-05-15T09:39:33.973351", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:46.786678Z", "iopub.execute_input": "2021-06-03T16:46:46.78696Z", "iopub.status.idle": "2021-06-03T16:46:46.79308Z", "shell.execute_reply.started": "2021-06-03T16:46:46.786926Z", "shell.execute_reply": "2021-06-03T16:46:46.792213Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "model = create_model()\nmodel.summary()", "metadata": {"papermill": {"duration": 5.106862, "end_time": "2021-05-15T09:39:39.123493", "exception": false, "start_time": "2021-05-15T09:39:34.016631", "status": "completed"}, "tags": [], "execution": {"iopub.status.busy": "2021-06-03T16:46:46.794578Z", "iopub.execute_input": "2021-06-03T16:46:46.794874Z", "iopub.status.idle": "2021-06-03T16:46:55.201073Z", "shell.execute_reply.started": "2021-06-03T16:46:46.794844Z", "shell.execute_reply": "2021-06-03T16:46:55.200242Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "model.load_weights('../input/tomatoleafdiseasedetection-weights/InceptionV3_256.h5')", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:46:55.203779Z", "iopub.execute_input": "2021-06-03T16:46:55.204091Z", "iopub.status.idle": "2021-06-03T16:46:57.925784Z", "shell.execute_reply.started": "2021-06-03T16:46:55.204061Z", "shell.execute_reply": "2021-06-03T16:46:57.92487Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "#ss=df_validate.sample(n=20)\nss=df_validate\nss=ss[['img', 'label']]\n\npreds = []\n\nfor image_id in ss.img:\n image = Image.open(os.path.join(\"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images/\", image_id))\n array = tf.keras.preprocessing.image.img_to_array(image)\n array=array/255\n image = np.expand_dims(array, axis = 0)\n preds.append(np.argmax(model.predict(image)))\n\nss['labelP'] = preds\nss\n", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:46:57.92718Z", "iopub.execute_input": "2021-06-03T16:46:57.927526Z", "iopub.status.idle": "2021-06-03T16:50:10.833681Z", "shell.execute_reply.started": "2021-06-03T16:46:57.92749Z", "shell.execute_reply": "2021-06-03T16:50:10.832868Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "score = model.evaluate_generator(validation_generator)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:50:10.835048Z", "iopub.execute_input": "2021-06-03T16:50:10.835388Z", "iopub.status.idle": "2021-06-03T16:50:26.8998Z", "shell.execute_reply.started": "2021-06-03T16:50:10.83535Z", "shell.execute_reply": "2021-06-03T16:50:26.898961Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "confusion_matrix = pd.crosstab(ss.label, ss.labelP, rownames=['Actual'], colnames=['Predicted'])\nprint (confusion_matrix)", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:50:26.902829Z", "iopub.execute_input": "2021-06-03T16:50:26.903112Z", "iopub.status.idle": "2021-06-03T16:50:26.934306Z", "shell.execute_reply.started": "2021-06-03T16:50:26.903083Z", "shell.execute_reply": "2021-06-03T16:50:26.93343Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "\nplt.figure(figsize=(10,8))\n\n#use seaborn to draw the headmap\nsns.heatmap(confusion_matrix, \n xticklabels=confusion_matrix.columns.values, #x label\n yticklabels=confusion_matrix.columns.values,cmap=\"YlGnBu\" ,annot=True, fmt=\"d\")\nplt.show()", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:50:26.936869Z", "iopub.execute_input": "2021-06-03T16:50:26.937153Z", "iopub.status.idle": "2021-06-03T16:50:27.522457Z", "shell.execute_reply.started": "2021-06-03T16:50:26.937125Z", "shell.execute_reply": "2021-06-03T16:50:27.52151Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "from imblearn.metrics import sensitivity_score, specificity_score\n\n\nfrom sklearn.metrics import f1_score, precision_score, recall_score,accuracy_score, confusion_matrix\ny_test=ss.label.values.astype(int)\ny_pred=ss.labelP.values.astype(int)\n\ntype(y_test)\n# Print f1, precision, and recall scores\nprint(\"specificity:\",specificity_score(y_test, y_pred , average=\"macro\"))\nprint(\"sensitivity:\",sensitivity_score(y_test, y_pred , average=\"macro\"))\nprint(\"recall:\",recall_score(y_test, y_pred , average=\"macro\"))\nprint(\"precision::\",precision_score(y_test, y_pred , average=\"macro\"))\nprint(\"f1_score:\",f1_score(y_test, y_pred , average=\"macro\"))\nprint(\"accuracy_score:\",accuracy_score(y_test, y_pred))", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:56:29.187956Z", "iopub.execute_input": "2021-06-03T16:56:29.188287Z", "iopub.status.idle": "2021-06-03T16:56:29.213283Z", "shell.execute_reply.started": "2021-06-03T16:56:29.188258Z", "shell.execute_reply": "2021-06-03T16:56:29.212478Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "from sklearn.metrics import classification_report\nimport numpy as np\n\n\nprint(classification_report(y_test, y_pred))", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:50:27.543402Z", "iopub.execute_input": "2021-06-03T16:50:27.543888Z", "iopub.status.idle": "2021-06-03T16:50:27.561971Z", "shell.execute_reply.started": "2021-06-03T16:50:27.543849Z", "shell.execute_reply": "2021-06-03T16:50:27.561011Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "y_true = y_test\ny_prediction = y_pred\ncnf_matrix = confusion_matrix(y_true, y_prediction)\nprint(cnf_matrix)\n#[[1 1 3]\n# [3 2 2]\n# [1 3 1]]\n\nFP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix) \nFN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\nTP = np.diag(cnf_matrix)\nTN = cnf_matrix.sum() - (FP + FN + TP)\n\nFP = FP.astype(float)\nFN = FN.astype(float)\nTP = TP.astype(float)\nTN = TN.astype(float)\n\n# Sensitivity, hit rate, recall, or true positive rate\nTPR = TP/(TP+FN)\n# Specificity or true negative rate\nTNR = TN/(TN+FP) \n# Precision or positive predictive value\nPPV = TP/(TP+FP)\n# Negative predictive value\nNPV = TN/(TN+FN)\n# Fall out or false positive rate\nFPR = FP/(FP+TN)\n# False negative rate\nFNR = FN/(TP+FN)\n# False discovery rate\nFDR = FP/(TP+FP)\n# Overall accuracy\nACC = (TP+TN)/(TP+FP+FN+TN)\n\nprint(\"Sensitivity OR recall\")\nprint(TPR)\nprint(\"-------------------\")\nprint(\"Specificity\")\nprint(TNR)\nprint(\"-------------------\")\nprint(\"Precision\")\nprint(PPV)\nprint(\"-------------------\")\nprint(\"accuracy\")\nprint(ACC)\n", "metadata": {"execution": {"iopub.status.busy": "2021-06-03T16:50:27.563465Z", "iopub.execute_input": "2021-06-03T16:50:27.563812Z", "iopub.status.idle": "2021-06-03T16:50:27.58652Z", "shell.execute_reply.started": "2021-06-03T16:50:27.563775Z", "shell.execute_reply": "2021-06-03T16:50:27.585446Z"}, "trusted": true}, "execution_count": null, "outputs": []}] | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046156.ipynb | tomato-diseases-dataset-csvimages | ammarnassanalhajali | [{"Id": 69046156, "ScriptId": 17307062, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5966695, "CreationDate": "07/26/2021 08:36:05", "VersionNumber": 5.0, "Title": "Tomato Leaf Disease Detection 0.998 [inference]", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 227.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 216.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}] | [{"Id": 91775532, "KernelVersionId": 69046156, "SourceDatasetVersionId": 2222983}] | [{"Id": 2222983, "DatasetId": 1335181, "DatasourceVersionId": 2264687, "CreatorUserId": 5966695, "LicenseName": "Unknown", "CreationDate": "05/12/2021 00:09:31", "VersionNumber": 1.0, "Title": "Tomato Diseases Dataset (CSV+Images)", "Slug": "tomato-diseases-dataset-csvimages", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}] | [{"Id": 1335181, "CreatorUserId": 5966695, "OwnerUserId": 5966695.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2222983.0, "CurrentDatasourceVersionId": 2264687.0, "ForumId": 1354120, "Type": 2, "CreationDate": "05/12/2021 00:09:31", "LastActivityDate": "05/12/2021", "TotalViews": 5441, "TotalDownloads": 516, "TotalVotes": 14, "TotalKernels": 2}] | [{"Id": 5966695, "UserName": "ammarnassanalhajali", "DisplayName": "Ammar Alhaj Ali", "RegisterDate": "10/15/2020", "PerformanceTier": 4}] | # # Tomato Leaf Disease Detection 0.998 [inference]
# ### Hi kagglers, This is `inference` notebook using `Keras`.
# >
# > [Tomato Leaf Disease Detection 0.998 [Training]](https://www.kaggle.com/ammarnassanalhajali/tomato-leaf-disease-detection-0-998-training)
# ### Please if this kernel is useful, please upvote !!
import os, cv2, json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras import models, layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.optimizers import Adam
from PIL import Image
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Dropout,
Activation,
Input,
BatchNormalization,
GlobalAveragePooling2D,
)
train = pd.read_csv("../input/tomato-diseases-dataset-csvimages/train.csv")
from sklearn.model_selection import train_test_split
df_train, df_validate, y_train, y_test = train_test_split(
train, train.label, train_size=0.8, random_state=42, stratify=train.label
)
df_train = df_train.reset_index(drop=True)
df_validate = df_validate.reset_index(drop=True)
sample = df_train[df_train.label == 3].sample(3)
plt.figure(figsize=(15, 5))
for ind, (img, label) in enumerate(zip(sample.img, sample.label)):
plt.subplot(1, 3, ind + 1)
img = cv2.imread(
os.path.join(
"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images",
img,
)
)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.axis("off")
plt.show()
# Main parameters
BATCH_SIZE = 16
STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE
VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE
EPOCHS = 60 #
IMG_WIDTH = 256
IMG_HEIGHT = 256
train_dir = "../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images"
df_train.label = df_train.label.astype("str")
df_validate.label = df_validate.label.astype("str")
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
rotation_range=180,
vertical_flip=True,
horizontal_flip=True,
)
# our train_datagen generator will use the following transformations on the images
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(
df_train,
train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
x_col="img",
y_col="label",
class_mode="categorical",
)
# generator = ImageDataGenerator(*args).flow_from_dataframe(dataframe, directory, target_size,
# batch_size, x_col, y_col, class_mode)
# your dataframe shoudl be in the format such that x_col = features, y_col = class/label
# binary class mode since output is either 0(dog) or 1(cat)
validation_generator = validation_datagen.flow_from_dataframe(
df_validate,
train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
x_col="img",
y_col="label",
class_mode="categorical",
batch_size=BATCH_SIZE,
)
def create_model():
efficientnet_layers = InceptionV3(
weights="imagenet",
include_top=False,
input_shape=(IMG_WIDTH, IMG_HEIGHT, 3),
pooling="avg",
)
model = Sequential()
model.add(efficientnet_layers)
model.add(Dense(10, activation="softmax"))
model.compile(
optimizer=Adam(lr=0.001), loss="categorical_crossentropy", metrics=["acc"]
)
return model
model = create_model()
model.summary()
model.load_weights("../input/tomatoleafdiseasedetection-weights/InceptionV3_256.h5")
# ss=df_validate.sample(n=20)
ss = df_validate
ss = ss[["img", "label"]]
preds = []
for image_id in ss.img:
image = Image.open(
os.path.join(
"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images/",
image_id,
)
)
array = tf.keras.preprocessing.image.img_to_array(image)
array = array / 255
image = np.expand_dims(array, axis=0)
preds.append(np.argmax(model.predict(image)))
ss["labelP"] = preds
ss
score = model.evaluate_generator(validation_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
confusion_matrix = pd.crosstab(
ss.label, ss.labelP, rownames=["Actual"], colnames=["Predicted"]
)
print(confusion_matrix)
plt.figure(figsize=(10, 8))
# use seaborn to draw the headmap
sns.heatmap(
confusion_matrix,
xticklabels=confusion_matrix.columns.values, # x label
yticklabels=confusion_matrix.columns.values,
cmap="YlGnBu",
annot=True,
fmt="d",
)
plt.show()
from imblearn.metrics import sensitivity_score, specificity_score
from sklearn.metrics import (
f1_score,
precision_score,
recall_score,
accuracy_score,
confusion_matrix,
)
y_test = ss.label.values.astype(int)
y_pred = ss.labelP.values.astype(int)
type(y_test)
# Print f1, precision, and recall scores
print("specificity:", specificity_score(y_test, y_pred, average="macro"))
print("sensitivity:", sensitivity_score(y_test, y_pred, average="macro"))
print("recall:", recall_score(y_test, y_pred, average="macro"))
print("precision::", precision_score(y_test, y_pred, average="macro"))
print("f1_score:", f1_score(y_test, y_pred, average="macro"))
print("accuracy_score:", accuracy_score(y_test, y_pred))
from sklearn.metrics import classification_report
import numpy as np
print(classification_report(y_test, y_pred))
y_true = y_test
y_prediction = y_pred
cnf_matrix = confusion_matrix(y_true, y_prediction)
print(cnf_matrix)
# [[1 1 3]
# [3 2 2]
# [1 3 1]]
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
print("Sensitivity OR recall")
print(TPR)
print("-------------------")
print("Specificity")
print(TNR)
print("-------------------")
print("Precision")
print(PPV)
print("-------------------")
print("accuracy")
print(ACC)
|
69046074 | [{"cell_type": "code", "source": "import numpy as np\nimport pandas as pd\nimport os\nimport tensorflow as tf\nimport cv2\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport nibabel as nib", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_paths0 = []\nlabels0 = []\nfor dirname, _, filenames in os.walk('../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-0'):\n for filename in filenames:\n image_paths0.append(os.path.join(dirname, filename))\n labels0.append(0)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_paths1 = []\nlabels1 = []\nfor dirname, _, filenames in os.walk('../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-1'):\n for filename in filenames:\n image_paths1.append(os.path.join(dirname, filename))\n labels1.append(1)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_paths2 = []\nlabels2 = []\nfor dirname, _, filenames in os.walk('../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-2'):\n for filename in filenames:\n image_paths2.append(os.path.join(dirname, filename))\n labels2.append(2)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_paths3 = []\nlabels3 = []\nfor dirname, _, filenames in os.walk('../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-3'):\n for filename in filenames:\n image_paths3.append(os.path.join(dirname, filename))\n labels3.append(3)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_paths = []\nimage_paths.extend(image_paths0)\nimage_paths.extend(image_paths1)\nimage_paths.extend(image_paths2)\nimage_paths.extend(image_paths3)\nlabels = []\nlabels.extend(labels0)\nlabels.extend(labels1)\nlabels.extend(labels2)\nlabels.extend(labels3)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "np.max(labels)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "from sklearn.utils import shuffle\nimage_paths, labels = shuffle(image_paths, labels, random_state=10800)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "#def parse_function(image_paths, labels):\n#image_path = tf.compat.v1.data.make_one_shot_iterator(image_path)\n#print(image_path)\nimage_names_tab = []\nlabels_tab = []\ncounter = 0\nfor (image_path, label) in zip(image_paths[:20], labels[:20]):\n niimg= nib.load(image_path)\n npimage = niimg.get_fdata()\n s = npimage.shape\n for j in range(20,30) :\n img = np.zeros((s[0],s[1],3))\n img[:,:,0] = npimage[:,:,j]\n img[:,:,1] = npimage[:,:,j]\n img[:,:,2] = npimage[:,:,j]\n img = img / np.max(npimage[:,:,j])\n #img = tf.cast(img, tf.float32) \n img = cv2.resize(img, (224, 224))\n image_names_tab.append(img)\n labels_tab.append(label)\n counter += 1\n print(counter, end = '\\r')", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "np.shape(image_names_tab)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_names = image_names_tab\nlabels = labels_tab", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_names1 = []\nimage_names2 = []\nimage_names3 = []\nimage_names4 = []\nimage_names5 = []\nimage_names6 = []\nimage_names7 = []\nimage_names8 = []\nimage_names9 = []\nimage_names10 = []\nlabels1 = []\nlabels2 = []\nlabels3 = []\nlabels4 = []\nlabels5 = []\nlabels6 = []\nlabels7 = []\nlabels8 = []\nlabels9 = []\nlabels10 = []\ncounter = 0\n\nfor i in range(0, len(image_names), 10):\n image_names1.append(image_names[i])\n image_names2.append(image_names[i+1])\n image_names3.append(image_names[i+2])\n image_names4.append(image_names[i+3])\n image_names5.append(image_names[i+4])\n image_names6.append(image_names[i+5])\n image_names7.append(image_names[i+6])\n image_names8.append(image_names[i+7])\n image_names9.append(image_names[i+8])\n image_names10.append(image_names[i+9])\n labels1.append(labels[i])\n labels2.append(labels[i+1])\n labels3.append(labels[i+2])\n labels4.append(labels[i+3])\n labels5.append(labels[i+4])\n labels6.append(labels[i+5])\n labels7.append(labels[i+6])\n labels8.append(labels[i+7])\n labels9.append(labels[i+8])\n labels10.append(labels[i+9])\n counter +=1\n print(counter, end ='\\r')", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "image_names1 = np.array(image_names1)\nimage_names2 = np.array(image_names2)\nimage_names3 = np.array(image_names3)\nimage_names4 = np.array(image_names4)\nimage_names5 = np.array(image_names5)\nimage_names6 = np.array(image_names6)\nimage_names7 = np.array(image_names7)\nimage_names8 = np.array(image_names8)\nimage_names9 = np.array(image_names9)\nimage_names10 = np.array(image_names10)\n", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "labels1 = np.array(labels1)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "from sklearn.utils import shuffle\nimage_names1, labels1, image_names2, labels2, image_names3, labels3, image_names4, labels4, image_names5, labels5, image_names6, labels6, image_names7, labels7, image_names8, labels8, image_names9, labels9, image_names10, labels10 = shuffle(image_names1, labels1, image_names2, labels2, image_names3, labels3, image_names4, labels4, image_names5, labels5, image_names6, labels6, image_names7, labels7, image_names8, labels8, image_names9, labels9, image_names10, labels10, random_state = 10000) ", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "i = 100\nprint(labels1[i])\nprint(labels5[i])\nprint(labels7[i])", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "import tensorflow as tf\nbase_model = tf.keras.applications.ResNet50(include_top = False, weights='imagenet', input_tensor=None, input_shape = (224, 224, 3), classes = 1000)", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, Input, Model\ninputA = Input(shape=(224,224,3))\ninputB = Input(shape=(224,224,3))\ninputC = Input(shape=(224,224,3))\ninputD = Input(shape=(224,224,3))\ninputE = Input(shape=(224,224,3))\ninputF = Input(shape=(224,224,3))\ninputG = Input(shape=(224,224,3))\ninputH = Input(shape=(224,224,3))\ninputI = Input(shape=(224,224,3))\ninputJ = Input(shape=(224,224,3))\n#defining parallel outputs\nA = Model(inputs=inputA, outputs=base_model(inputA))\nB = Model(inputs=inputB, outputs=base_model(inputB))\nC = Model(inputs=inputC, outputs=base_model(inputC))\nD = Model(inputs=inputD, outputs=base_model(inputD))\nE = Model(inputs=inputE, outputs=base_model(inputE))\nF = Model(inputs=inputF, outputs=base_model(inputF))\nG = Model(inputs=inputG, outputs=base_model(inputG))\nH = Model(inputs=inputH, outputs=base_model(inputH))\nI = Model(inputs=inputI, outputs=base_model(inputI))\nJ = Model(inputs=inputJ, outputs=base_model(inputJ))\ncombined = layers.Add()([A.output, B.output, C.output, D.output, E.output, F.output, G.output, H.output, I.output, J.output])\n#x = layers.Conv2D(512, 3, activation = 'relu', padding = 'same')(combined)\n#fx = layers.Conv2D(512, 3, activation='relu', padding='same')(x)\n#fx = layers.BatchNormalization()(fx)\n#fx = layers.Conv2D(512, 3, padding='same')(fx)\n#out = layers.Add()([x,fx])\n#out = layers.MaxPooling2D()(out)\n#out = layers.ReLU()(out)\n#out = layers.BatchNormalization()(out)\nz = layers.Flatten()(combined)\n#z = layers.Dense(4096, activation=\"relu\")(z)\n#z = layers.Dropout(0.5)(z)\n#z = layers.Dense(4096, activation='relu')(z)\n#z = layers.Dropout(0.4)(z)\nz = layers.Dense(4, activation=\"softmax\")(z)\nmodel = Model(inputs=[A.input, B.input, C.input, D.input, E.input, F.input, G.input, H.input, I.input, J.input], outputs=z)\nmodel.summary()", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "for layer in model.layers:\n layer.trainable = True", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "model.compile(loss = 'sparse_categorical_crossentropy', optimizer = tf.keras.optimizers.Adam(), metrics = ['acc'])", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "from keras.callbacks import ModelCheckpoint\ncheckpoint = ModelCheckpoint(\"nohnohmosmed.h5\", monitor='val_acc', verbose=1, save_best_only=True, mode='auto')", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "History = model.fit(x=[image_names1, image_names2, image_names3, image_names4, image_names5, image_names6, image_names7, image_names8, image_names9, image_names10], y = labels1, validation_split = 0.2, epochs = 50, callbacks = [checkpoint])", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "model.summary()", "metadata": {"trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "model = model.save_weights('model_mri.h5')", "metadata": {}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "loss = model.history['loss']\nval_loss = model.history['val_loss']\nepochs = range(300)\nplt.figure()\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.show()", "metadata": {}, "execution_count": null, "outputs": []}] | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046074.ipynb | mosmeddata-fullchestct | ahmedamineafardas | [{"Id": 69046074, "ScriptId": 18591601, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7118898, "CreationDate": "07/26/2021 08:34:50", "VersionNumber": 1.0, "Title": "Biotech MosMed Dataset model", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 219.0, "LinesInsertedFromPrevious": 33.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 186.0, "LinesInsertedFromFork": 33.0, "LinesDeletedFromFork": 27.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 186.0, "TotalVotes": 0}] | [{"Id": 91775379, "KernelVersionId": 69046074, "SourceDatasetVersionId": 2076367}] | [{"Id": 2076367, "DatasetId": 1244618, "DatasourceVersionId": 2116675, "CreatorUserId": 7051386, "LicenseName": "Unknown", "CreationDate": "04/01/2021 04:22:08", "VersionNumber": 1.0, "Title": "MosMedData FullChestCT", "Slug": "mosmeddata-fullchestct", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}] | [{"Id": 1244618, "CreatorUserId": 7051386, "OwnerUserId": 7051386.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2076367.0, "CurrentDatasourceVersionId": 2116675.0, "ForumId": 1262902, "Type": 2, "CreationDate": "04/01/2021 04:22:08", "LastActivityDate": "04/01/2021", "TotalViews": 1017, "TotalDownloads": 52, "TotalVotes": 1, "TotalKernels": 4}] | [{"Id": 7051386, "UserName": "ahmedamineafardas", "DisplayName": "ahmed amine afardas", "RegisterDate": "03/28/2021", "PerformanceTier": 0}] | import numpy as np
import pandas as pd
import os
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import nibabel as nib
image_paths0 = []
labels0 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-0"
):
for filename in filenames:
image_paths0.append(os.path.join(dirname, filename))
labels0.append(0)
image_paths1 = []
labels1 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-1"
):
for filename in filenames:
image_paths1.append(os.path.join(dirname, filename))
labels1.append(1)
image_paths2 = []
labels2 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-2"
):
for filename in filenames:
image_paths2.append(os.path.join(dirname, filename))
labels2.append(2)
image_paths3 = []
labels3 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-3"
):
for filename in filenames:
image_paths3.append(os.path.join(dirname, filename))
labels3.append(3)
image_paths = []
image_paths.extend(image_paths0)
image_paths.extend(image_paths1)
image_paths.extend(image_paths2)
image_paths.extend(image_paths3)
labels = []
labels.extend(labels0)
labels.extend(labels1)
labels.extend(labels2)
labels.extend(labels3)
np.max(labels)
from sklearn.utils import shuffle
image_paths, labels = shuffle(image_paths, labels, random_state=10800)
# def parse_function(image_paths, labels):
# image_path = tf.compat.v1.data.make_one_shot_iterator(image_path)
# print(image_path)
image_names_tab = []
labels_tab = []
counter = 0
for image_path, label in zip(image_paths[:20], labels[:20]):
niimg = nib.load(image_path)
npimage = niimg.get_fdata()
s = npimage.shape
for j in range(20, 30):
img = np.zeros((s[0], s[1], 3))
img[:, :, 0] = npimage[:, :, j]
img[:, :, 1] = npimage[:, :, j]
img[:, :, 2] = npimage[:, :, j]
img = img / np.max(npimage[:, :, j])
# img = tf.cast(img, tf.float32)
img = cv2.resize(img, (224, 224))
image_names_tab.append(img)
labels_tab.append(label)
counter += 1
print(counter, end="\r")
np.shape(image_names_tab)
image_names = image_names_tab
labels = labels_tab
image_names1 = []
image_names2 = []
image_names3 = []
image_names4 = []
image_names5 = []
image_names6 = []
image_names7 = []
image_names8 = []
image_names9 = []
image_names10 = []
labels1 = []
labels2 = []
labels3 = []
labels4 = []
labels5 = []
labels6 = []
labels7 = []
labels8 = []
labels9 = []
labels10 = []
counter = 0
for i in range(0, len(image_names), 10):
image_names1.append(image_names[i])
image_names2.append(image_names[i + 1])
image_names3.append(image_names[i + 2])
image_names4.append(image_names[i + 3])
image_names5.append(image_names[i + 4])
image_names6.append(image_names[i + 5])
image_names7.append(image_names[i + 6])
image_names8.append(image_names[i + 7])
image_names9.append(image_names[i + 8])
image_names10.append(image_names[i + 9])
labels1.append(labels[i])
labels2.append(labels[i + 1])
labels3.append(labels[i + 2])
labels4.append(labels[i + 3])
labels5.append(labels[i + 4])
labels6.append(labels[i + 5])
labels7.append(labels[i + 6])
labels8.append(labels[i + 7])
labels9.append(labels[i + 8])
labels10.append(labels[i + 9])
counter += 1
print(counter, end="\r")
image_names1 = np.array(image_names1)
image_names2 = np.array(image_names2)
image_names3 = np.array(image_names3)
image_names4 = np.array(image_names4)
image_names5 = np.array(image_names5)
image_names6 = np.array(image_names6)
image_names7 = np.array(image_names7)
image_names8 = np.array(image_names8)
image_names9 = np.array(image_names9)
image_names10 = np.array(image_names10)
labels1 = np.array(labels1)
from sklearn.utils import shuffle
(
image_names1,
labels1,
image_names2,
labels2,
image_names3,
labels3,
image_names4,
labels4,
image_names5,
labels5,
image_names6,
labels6,
image_names7,
labels7,
image_names8,
labels8,
image_names9,
labels9,
image_names10,
labels10,
) = shuffle(
image_names1,
labels1,
image_names2,
labels2,
image_names3,
labels3,
image_names4,
labels4,
image_names5,
labels5,
image_names6,
labels6,
image_names7,
labels7,
image_names8,
labels8,
image_names9,
labels9,
image_names10,
labels10,
random_state=10000,
)
i = 100
print(labels1[i])
print(labels5[i])
print(labels7[i])
import tensorflow as tf
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(224, 224, 3),
classes=1000,
)
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input, Model
inputA = Input(shape=(224, 224, 3))
inputB = Input(shape=(224, 224, 3))
inputC = Input(shape=(224, 224, 3))
inputD = Input(shape=(224, 224, 3))
inputE = Input(shape=(224, 224, 3))
inputF = Input(shape=(224, 224, 3))
inputG = Input(shape=(224, 224, 3))
inputH = Input(shape=(224, 224, 3))
inputI = Input(shape=(224, 224, 3))
inputJ = Input(shape=(224, 224, 3))
# defining parallel outputs
A = Model(inputs=inputA, outputs=base_model(inputA))
B = Model(inputs=inputB, outputs=base_model(inputB))
C = Model(inputs=inputC, outputs=base_model(inputC))
D = Model(inputs=inputD, outputs=base_model(inputD))
E = Model(inputs=inputE, outputs=base_model(inputE))
F = Model(inputs=inputF, outputs=base_model(inputF))
G = Model(inputs=inputG, outputs=base_model(inputG))
H = Model(inputs=inputH, outputs=base_model(inputH))
I = Model(inputs=inputI, outputs=base_model(inputI))
J = Model(inputs=inputJ, outputs=base_model(inputJ))
combined = layers.Add()(
[
A.output,
B.output,
C.output,
D.output,
E.output,
F.output,
G.output,
H.output,
I.output,
J.output,
]
)
# x = layers.Conv2D(512, 3, activation = 'relu', padding = 'same')(combined)
# fx = layers.Conv2D(512, 3, activation='relu', padding='same')(x)
# fx = layers.BatchNormalization()(fx)
# fx = layers.Conv2D(512, 3, padding='same')(fx)
# out = layers.Add()([x,fx])
# out = layers.MaxPooling2D()(out)
# out = layers.ReLU()(out)
# out = layers.BatchNormalization()(out)
z = layers.Flatten()(combined)
# z = layers.Dense(4096, activation="relu")(z)
# z = layers.Dropout(0.5)(z)
# z = layers.Dense(4096, activation='relu')(z)
# z = layers.Dropout(0.4)(z)
z = layers.Dense(4, activation="softmax")(z)
model = Model(
inputs=[
A.input,
B.input,
C.input,
D.input,
E.input,
F.input,
G.input,
H.input,
I.input,
J.input,
],
outputs=z,
)
model.summary()
for layer in model.layers:
layer.trainable = True
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["acc"],
)
from keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint(
"nohnohmosmed.h5", monitor="val_acc", verbose=1, save_best_only=True, mode="auto"
)
History = model.fit(
x=[
image_names1,
image_names2,
image_names3,
image_names4,
image_names5,
image_names6,
image_names7,
image_names8,
image_names9,
image_names10,
],
y=labels1,
validation_split=0.2,
epochs=50,
callbacks=[checkpoint],
)
model.summary()
model = model.save_weights("model_mri.h5")
loss = model.history["loss"]
val_loss = model.history["val_loss"]
epochs = range(300)
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
|
69046611 | [{"cell_type": "code", "source": "import numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.multioutput import MultiOutputRegressor\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:55.905109Z", "iopub.execute_input": "2021-07-26T06:22:55.905515Z", "iopub.status.idle": "2021-07-26T06:22:56.997939Z", "shell.execute_reply.started": "2021-07-26T06:22:55.905403Z", "shell.execute_reply": "2021-07-26T06:22:56.996999Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train = pd.read_csv('/kaggle/input/tabular-playground-series-jul-2021/train.csv')\ntest = pd.read_csv('/kaggle/input/tabular-playground-series-jul-2021/test.csv')\nsub = pd.read_csv('/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:56.999435Z", "iopub.execute_input": "2021-07-26T06:22:56.999773Z", "iopub.status.idle": "2021-07-26T06:22:57.058685Z", "shell.execute_reply.started": "2021-07-26T06:22:56.999737Z", "shell.execute_reply": "2021-07-26T06:22:57.057722Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train = train.set_index(\"date_time\").copy()\ntest = test.set_index(\"date_time\").copy()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:57.0622Z", "iopub.execute_input": "2021-07-26T06:22:57.062464Z", "iopub.status.idle": "2021-07-26T06:22:57.080274Z", "shell.execute_reply.started": "2021-07-26T06:22:57.062428Z", "shell.execute_reply": "2021-07-26T06:22:57.079515Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "target_cols = [col for col in train.columns if col.startswith('target')]\nfeat_cols = [col for col in train.columns if col not in target_cols]", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:57.083298Z", "iopub.execute_input": "2021-07-26T06:22:57.08367Z", "iopub.status.idle": "2021-07-26T06:22:57.090197Z", "shell.execute_reply.started": "2021-07-26T06:22:57.083637Z", "shell.execute_reply": "2021-07-26T06:22:57.089348Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train, val = train_test_split(train, test_size=0.2, random_state=42)\nfea_scaler = MinMaxScaler()\nlab_scaler = MinMaxScaler()\n\nXtrain_scaled = fea_scaler.fit_transform(train.drop(target_cols[:],axis=1))\nXval_scaled = fea_scaler.transform(val.drop(target_cols[:],axis=1))\nYtrain_scaled =lab_scaler.fit_transform(train[target_cols[:]])\nYval_scaled =lab_scaler.transform(val[target_cols[:]])\nXtest_scaled = fea_scaler.transform(test)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:57.093728Z", "iopub.execute_input": "2021-07-26T06:22:57.094039Z", "iopub.status.idle": "2021-07-26T06:22:57.122282Z", "shell.execute_reply.started": "2021-07-26T06:22:57.094004Z", "shell.execute_reply": "2021-07-26T06:22:57.12153Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}\nmodel = xgb.XGBRegressor(**other_params)\nmultioutputregressor = MultiOutputRegressor(xgb.XGBRegressor(objective='reg:squarederror',**other_params)).fit(Xtrain_scaled, Ytrain_scaled)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:35:58.78341Z", "iopub.execute_input": "2021-07-26T08:35:58.783744Z", "iopub.status.idle": "2021-07-26T08:36:03.669588Z", "shell.execute_reply.started": "2021-07-26T08:35:58.783713Z", "shell.execute_reply": "2021-07-26T08:36:03.668683Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'n_estimators': [400, 500, 600, 700, 800]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_ \n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:30:28.413953Z", "iopub.execute_input": "2021-07-26T06:30:28.41428Z", "iopub.status.idle": "2021-07-26T06:36:38.837216Z", "shell.execute_reply.started": "2021-07-26T06:30:28.414251Z", "shell.execute_reply": "2021-07-26T06:36:38.836204Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'max_depth': [3, 4, 5, 6, 7, 8, 9, 10]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_ \n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:50:48.265828Z", "iopub.execute_input": "2021-07-26T07:50:48.266183Z", "iopub.status.idle": "2021-07-26T08:00:43.221204Z", "shell.execute_reply.started": "2021-07-26T07:50:48.26615Z", "shell.execute_reply": "2021-07-26T08:00:43.220325Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'min_child_weight': [1, 2, 3, 4, 5, 6]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_ \n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:06:26.434258Z", "iopub.execute_input": "2021-07-26T08:06:26.434607Z", "iopub.status.idle": "2021-07-26T08:12:39.517724Z", "shell.execute_reply.started": "2021-07-26T08:06:26.434573Z", "shell.execute_reply": "2021-07-26T08:12:39.516614Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_\n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:34:35.386076Z", "iopub.execute_input": "2021-07-26T07:34:35.386394Z", "iopub.status.idle": "2021-07-26T07:40:45.010251Z", "shell.execute_reply.started": "2021-07-26T07:34:35.386365Z", "shell.execute_reply": "2021-07-26T07:40:45.009245Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'subsample': [0.6, 0.7, 0.8, 0.9], 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# #evalute_result = optimized_GBM.grid_scores_\n# #print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:13:25.544492Z", "iopub.execute_input": "2021-07-26T08:13:25.544821Z", "iopub.status.idle": "2021-07-26T08:28:44.136295Z", "shell.execute_reply.started": "2021-07-26T08:13:25.54479Z", "shell.execute_reply": "2021-07-26T08:28:44.133547Z"}, "collapsed": true, "jupyter": {"outputs_hidden": true}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# #evalute_result = optimized_GBM.grid_scores_\n# #print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'n_estimators': [400, 500, 600, 700, 800], \n# 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10], \n# 'min_child_weight': [1, 2, 3, 4, 5, 6], \n# 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], \n# 'subsample': [0.6, 0.7, 0.8, 0.9], \n# 'colsample_bytree': [0.6, 0.7, 0.8, 0.9], \n# 'reg_alpha': [0.05, 0.1, 1, 2, 3], \n# 'reg_lambda': [0.05, 0.1, 1, 2, 3], \n# 'learning_rate': [0.01, 0.05, 0.07, 0.1, 0.2]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=10)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_\n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:31:59.64377Z", "iopub.execute_input": "2021-07-26T08:31:59.644124Z", "iopub.status.idle": "2021-07-26T08:33:50.113972Z", "shell.execute_reply.started": "2021-07-26T08:31:59.644093Z", "shell.execute_reply": "2021-07-26T08:33:50.110025Z"}, "collapsed": true, "jupyter": {"outputs_hidden": true}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "pred = multioutputregressor.predict(Xtest_scaled)\npred = lab_scaler.inverse_transform(pred)\npred = pred.reshape(2247, 3)\nsub[target_cols[:]] = pred\nsub.to_csv('sample_submission.csv', index=0)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:39:22.229237Z", "iopub.execute_input": "2021-07-26T08:39:22.229569Z", "iopub.status.idle": "2021-07-26T08:39:22.274571Z", "shell.execute_reply.started": "2021-07-26T08:39:22.229537Z", "shell.execute_reply": "2021-07-26T08:39:22.273824Z"}, "trusted": true}, "execution_count": null, "outputs": []}] | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046611.ipynb | null | null | [{"Id": 69046611, "ScriptId": 18816238, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7392108, "CreationDate": "07/26/2021 08:43:12", "VersionNumber": 3.0, "Title": "XGBoost", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 119.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}] | null | null | null | null | import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputRegressor
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv")
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
sub = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv"
)
train = train.set_index("date_time").copy()
test = test.set_index("date_time").copy()
target_cols = [col for col in train.columns if col.startswith("target")]
feat_cols = [col for col in train.columns if col not in target_cols]
train, val = train_test_split(train, test_size=0.2, random_state=42)
fea_scaler = MinMaxScaler()
lab_scaler = MinMaxScaler()
Xtrain_scaled = fea_scaler.fit_transform(train.drop(target_cols[:], axis=1))
Xval_scaled = fea_scaler.transform(val.drop(target_cols[:], axis=1))
Ytrain_scaled = lab_scaler.fit_transform(train[target_cols[:]])
Yval_scaled = lab_scaler.transform(val[target_cols[:]])
Xtest_scaled = fea_scaler.transform(test)
other_params = {
"learning_rate": 0.1,
"n_estimators": 400,
"max_depth": 4,
"min_child_weight": 5,
"seed": 0,
"subsample": 0.8,
"colsample_bytree": 0.8,
"gamma": 0.1,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
}
model = xgb.XGBRegressor(**other_params)
multioutputregressor = MultiOutputRegressor(
xgb.XGBRegressor(objective="reg:squarederror", **other_params)
).fit(Xtrain_scaled, Ytrain_scaled)
# cv_params = {'n_estimators': [400, 500, 600, 700, 800]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'max_depth': [3, 4, 5, 6, 7, 8, 9, 10]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'min_child_weight': [1, 2, 3, 4, 5, 6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'subsample': [0.6, 0.7, 0.8, 0.9], 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'n_estimators': [400, 500, 600, 700, 800],
# 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10],
# 'min_child_weight': [1, 2, 3, 4, 5, 6],
# 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
# 'subsample': [0.6, 0.7, 0.8, 0.9],
# 'colsample_bytree': [0.6, 0.7, 0.8, 0.9],
# 'reg_alpha': [0.05, 0.1, 1, 2, 3],
# 'reg_lambda': [0.05, 0.1, 1, 2, 3],
# 'learning_rate': [0.01, 0.05, 0.07, 0.1, 0.2]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=10)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
pred = multioutputregressor.predict(Xtest_scaled)
pred = lab_scaler.inverse_transform(pred)
pred = pred.reshape(2247, 3)
sub[target_cols[:]] = pred
sub.to_csv("sample_submission.csv", index=0)
|
69046436 | [{"cell_type": "code", "source": "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np \nimport pandas as pd\nfrom sklearn.impute import SimpleImputer, KNNImputer\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport seaborn as sns\n\nfrom catboost import CatBoostRegressor\nfrom lightgbm import LGBMRegressor\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom xgboost import XGBRegressor\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session", "metadata": {"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "execution": {"iopub.status.busy": "2021-07-26T06:35:38.103387Z", "iopub.execute_input": "2021-07-26T06:35:38.103714Z", "iopub.status.idle": "2021-07-26T06:35:41.010228Z", "shell.execute_reply.started": "2021-07-26T06:35:38.103630Z", "shell.execute_reply": "2021-07-26T06:35:41.009213Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train = pd.read_csv(\"../input/house-prices-advanced-regression-techniques/train.csv\")\ntest = pd.read_csv(\"../input/house-prices-advanced-regression-techniques/test.csv\")\nprint(train.shape)\nprint(test.shape)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:56:11.667398Z", "iopub.execute_input": "2021-07-26T06:56:11.667726Z", "iopub.status.idle": "2021-07-26T06:56:11.747206Z", "shell.execute_reply.started": "2021-07-26T06:56:11.667697Z", "shell.execute_reply": "2021-07-26T06:56:11.746289Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df = train.append(test).reset_index(drop=True)\nprint(df.shape)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:56:15.005056Z", "iopub.execute_input": "2021-07-26T06:56:15.005421Z", "iopub.status.idle": "2021-07-26T06:56:15.029843Z", "shell.execute_reply.started": "2021-07-26T06:56:15.005390Z", "shell.execute_reply": "2021-07-26T06:56:15.029203Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df.columns", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:56:17.230483Z", "iopub.execute_input": "2021-07-26T06:56:17.230819Z", "iopub.status.idle": "2021-07-26T06:56:17.236956Z", "shell.execute_reply.started": "2021-07-26T06:56:17.230790Z", "shell.execute_reply": "2021-07-26T06:56:17.236241Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "def check_df(dataframe, head=5):\n print(\"##################### Shape #####################\")\n print(dataframe.shape)\n print(\"##################### Types #####################\")\n print(dataframe.dtypes)\n print(\"##################### Head #####################\")\n print(dataframe.head(head))\n print(\"##################### Tail #####################\")\n print(dataframe.tail(head))\n print(\"##################### NA #####################\")\n print(dataframe.isnull().sum())\n print(\"##################### Quantiles #####################\")\n print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)\n\ncheck_df(df)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:56:53.470511Z", "iopub.execute_input": "2021-07-26T06:56:53.470854Z", "iopub.status.idle": "2021-07-26T06:56:53.543650Z", "shell.execute_reply.started": "2021-07-26T06:56:53.470822Z", "shell.execute_reply": "2021-07-26T06:56:53.542655Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "def grab_col_names(dataframe, cat_th=10, car_th=20):\n # cat_cols, cat_but_car\n cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == \"O\"]\n num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and\n dataframe[col].dtypes != \"O\"]\n cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and\n dataframe[col].dtypes == \"O\"]\n cat_cols = cat_cols + num_but_cat\n cat_cols = [col for col in cat_cols if col not in cat_but_car]\n\n # num_cols\n num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != \"O\"]\n num_cols = [col for col in num_cols if col not in num_but_cat]\n\n print(f\"Observations: {dataframe.shape[0]}\")\n print(f\"Variables: {dataframe.shape[1]}\")\n print(f'cat_cols: {len(cat_cols)}')\n print(f'num_cols: {len(num_cols)}')\n print(f'cat_but_car: {len(cat_but_car)}')\n print(f'num_but_cat: {len(num_but_cat)}')\n return cat_cols, num_cols, cat_but_car\n\ncat_cols, num_cols, cat_but_car = grab_col_names(df)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:57:01.827695Z", "iopub.execute_input": "2021-07-26T06:57:01.828041Z", "iopub.status.idle": "2021-07-26T06:57:01.916977Z", "shell.execute_reply.started": "2021-07-26T06:57:01.828011Z", "shell.execute_reply": "2021-07-26T06:57:01.916323Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df[\"Neighborhood\"].value_counts()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:57:05.951786Z", "iopub.execute_input": "2021-07-26T06:57:05.952331Z", "iopub.status.idle": "2021-07-26T06:57:05.960409Z", "shell.execute_reply.started": "2021-07-26T06:57:05.952286Z", "shell.execute_reply": "2021-07-26T06:57:05.959550Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Kategorik De\u011fi\u015fken Analizi\ndef cat_summary(dataframe, col_name, plot=False):\n print(pd.DataFrame({col_name: dataframe[col_name].value_counts(),\n \"Ratio\": 100 * dataframe[col_name].value_counts() / len(dataframe)}))\n print(\"##########################################\")\n if plot:\n sns.countplot(x=dataframe[col_name], data=dataframe)\n plt.show()\n\nfor col in cat_cols:\n cat_summary(df, col)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:58:04.294283Z", "iopub.execute_input": "2021-07-26T06:58:04.294628Z", "iopub.status.idle": "2021-07-26T06:58:04.548846Z", "shell.execute_reply.started": "2021-07-26T06:58:04.294600Z", "shell.execute_reply": "2021-07-26T06:58:04.548019Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "for col in cat_but_car:\n cat_summary(df, col)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:58:12.422333Z", "iopub.execute_input": "2021-07-26T06:58:12.422660Z", "iopub.status.idle": "2021-07-26T06:58:12.435637Z", "shell.execute_reply.started": "2021-07-26T06:58:12.422632Z", "shell.execute_reply": "2021-07-26T06:58:12.434774Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Say\u0131sal De\u011fi\u015fken Analizi\ndf[num_cols].describe([0.10, 0.30, 0.50, 0.70, 0.80, 0.99]).T", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:58:20.021876Z", "iopub.execute_input": "2021-07-26T06:58:20.022255Z", "iopub.status.idle": "2021-07-26T06:58:20.126852Z", "shell.execute_reply.started": "2021-07-26T06:58:20.022220Z", "shell.execute_reply": "2021-07-26T06:58:20.125776Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "#Target Analizi\ndf[\"SalePrice\"].describe([0.05, 0.10, 0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]).T", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:58:31.964979Z", "iopub.execute_input": "2021-07-26T06:58:31.965319Z", "iopub.status.idle": "2021-07-26T06:58:31.975889Z", "shell.execute_reply.started": "2021-07-26T06:58:31.965287Z", "shell.execute_reply": "2021-07-26T06:58:31.974986Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "def find_correlation(dataframe, numeric_cols, corr_limit=0.60):\n high_correlations = []\n low_correlations = []\n for col in numeric_cols:\n if col == \"SalePrice\":\n pass\n else:\n correlation = dataframe[[col, \"SalePrice\"]].corr().loc[col, \"SalePrice\"]\n print(col, correlation)\n if abs(correlation) > corr_limit:\n high_correlations.append(col + \": \" + str(correlation))\n else:\n low_correlations.append(col + \": \" + str(correlation))\n return low_correlations, high_correlations\n\nlow_corrs, high_corrs = find_correlation(df, num_cols)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:58:36.492891Z", "iopub.execute_input": "2021-07-26T06:58:36.493252Z", "iopub.status.idle": "2021-07-26T06:58:36.550872Z", "shell.execute_reply.started": "2021-07-26T06:58:36.493219Z", "shell.execute_reply": "2021-07-26T06:58:36.549920Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# t\u00fcm de\u011fi\u015fkenler korelasyon\ncorr_matrix = df.corr()\nsns.clustermap(corr_matrix, annot = True, figsize=(20,15), fmt=\".2f\" )\nplt.title(\"Correlation Between Features\")\nplt.show()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:59:02.379604Z", "iopub.execute_input": "2021-07-26T06:59:02.379964Z", "iopub.status.idle": "2021-07-26T06:59:09.527020Z", "shell.execute_reply.started": "2021-07-26T06:59:02.379931Z", "shell.execute_reply": "2021-07-26T06:59:09.526129Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "threshold = 0.60\nfilter = np.abs(corr_matrix[\"SalePrice\"]) > threshold\ncorr_features = corr_matrix.columns[filter].tolist()\nsns.clustermap(df[corr_features].corr(), annot = True, fmt = \".2f\")\nplt.title(\"Correlation Between Features w/ Corr Threshold 0.60)\")\nplt.show()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:59:27.360125Z", "iopub.execute_input": "2021-07-26T06:59:27.360615Z", "iopub.status.idle": "2021-07-26T06:59:28.267316Z", "shell.execute_reply.started": "2021-07-26T06:59:27.360584Z", "shell.execute_reply": "2021-07-26T06:59:28.266353Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "def high_correlated_cols(dataframe, plot=False, corr_th=0.60):\n corr = dataframe.corr()\n cor_matrix = corr.abs()\n upper_triangle_matrix = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool))\n drop_list = [col for col in upper_triangle_matrix.columns if any(upper_triangle_matrix[col] > corr_th)]\n if plot:\n import seaborn as sns\n import matplotlib.pyplot as plt\n sns.set(rc={'figure.figsize': (15, 15)})\n sns.heatmap(corr, cmap=\"RdBu\")\n plt.show()\n return drop_list\n\nhigh_correlated_cols(df)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:59:56.883946Z", "iopub.execute_input": "2021-07-26T06:59:56.884320Z", "iopub.status.idle": "2021-07-26T06:59:56.917436Z", "shell.execute_reply.started": "2021-07-26T06:59:56.884282Z", "shell.execute_reply": "2021-07-26T06:59:56.916392Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# FEATURE ENGINEERING\n\ndf[\"SqFtPerRoom\"] = df[\"GrLivArea\"] / (df[\"TotRmsAbvGrd\"] +\n df[\"FullBath\"] +\n df[\"HalfBath\"] +\n df[\"KitchenAbvGr\"])\n\ndf['Total_Home_Quality'] = df['OverallQual'] + df['OverallCond']\n\ndf['Total_Bathrooms'] = (df['FullBath'] + (0.5 * df['HalfBath']) +\n df['BsmtFullBath'] + (0.5 * df['BsmtHalfBath']))\n\ndf[\"HighQualSF\"] = df[\"1stFlrSF\"] + df[\"2ndFlrSF\"]", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:02:03.707537Z", "iopub.execute_input": "2021-07-26T07:02:03.707893Z", "iopub.status.idle": "2021-07-26T07:02:03.719957Z", "shell.execute_reply.started": "2021-07-26T07:02:03.707859Z", "shell.execute_reply": "2021-07-26T07:02:03.718835Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Converting non-numeric predictors stored as numbers into string\n\ndf['MSSubClass'] = df['MSSubClass'].apply(str)\ndf['YrSold'] = df['YrSold'].apply(str)\ndf['MoSold'] = df['MoSold'].apply(str)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:02:36.670815Z", "iopub.execute_input": "2021-07-26T07:02:36.671180Z", "iopub.status.idle": "2021-07-26T07:02:36.682970Z", "shell.execute_reply.started": "2021-07-26T07:02:36.671150Z", "shell.execute_reply": "2021-07-26T07:02:36.681974Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# RARE ENCODING\ndef rare_encoder(dataframe, rare_perc, cat_cols):\n rare_columns = [col for col in cat_cols if (dataframe[col].value_counts() / len(dataframe) < 0.01).sum()>1]\n for col in rare_columns:\n tmp = dataframe[col].value_counts() / len(dataframe)\n rare_labels = tmp[tmp < rare_perc].index\n dataframe[col] = np.where(dataframe[col].isin(rare_labels), 'Rare', dataframe[col])\n\n return dataframe\n\ndef rare_analyser(dataframe, target, cat_cols):\n for col in cat_cols:\n print(col, \":\", len(dataframe[col].value_counts()))\n print(pd.DataFrame({\"COUNT\": dataframe[col].value_counts(),\n \"RATIO\": dataframe[col].value_counts() / len(dataframe),\n \"TARGET_MEAN\": dataframe.groupby(col)[target].mean()}), end=\"\\n\\n\\n\")\n\nrare_analyser(df, \"SalePrice\", cat_cols)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:05:31.892893Z", "iopub.execute_input": "2021-07-26T07:05:31.893256Z", "iopub.status.idle": "2021-07-26T07:05:32.320672Z", "shell.execute_reply.started": "2021-07-26T07:05:31.893226Z", "shell.execute_reply": "2021-07-26T07:05:32.319749Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df = rare_encoder(df, 0.01, cat_cols)\n\ndrop_list = [\"Street\", \"SaleCondition\", \"Functional\", \"Condition2\", \"Utilities\", \"SaleType\", \"MiscVal\",\n \"Alley\", \"LandSlope\", \"PoolQC\", \"MiscFeature\", \"Electrical\", \"Fence\", \"RoofStyle\", \"RoofMatl\",\n \"FireplaceQu\"]\n\ncat_cols = [col for col in cat_cols if col not in drop_list]\n\nfor col in drop_list:\n df.drop(col, axis=1, inplace=True)\n\nrare_analyser(df, \"SalePrice\", cat_cols)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:06:02.313799Z", "iopub.execute_input": "2021-07-26T07:06:02.314159Z", "iopub.status.idle": "2021-07-26T07:06:02.819390Z", "shell.execute_reply.started": "2021-07-26T07:06:02.314123Z", "shell.execute_reply": "2021-07-26T07:06:02.818445Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "useless_cols = [col for col in cat_cols if df[col].nunique() == 1 or\n (df[col].nunique() == 2 and (df[col].value_counts() / len(df) <= 0.01).any(axis=None))]\n\ncat_cols = [col for col in cat_cols if col not in useless_cols]\n\n\nfor col in useless_cols:\n df.drop(col, axis=1, inplace=True)\n\nrare_analyser(df, \"SalePrice\", cat_cols)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:06:26.808903Z", "iopub.execute_input": "2021-07-26T07:06:26.809294Z", "iopub.status.idle": "2021-07-26T07:06:27.183654Z", "shell.execute_reply.started": "2021-07-26T07:06:26.809256Z", "shell.execute_reply": "2021-07-26T07:06:27.182657Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Label Encoding & ONE-HOT ENCODING\n\ndef one_hot_encoder(dataframe, categorical_cols, drop_first=False):\n dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first)\n return dataframe\n\ncat_cols, num_cols, cat_but_car = grab_col_names(df)\ncat_cols = cat_cols + cat_but_car\ndf = one_hot_encoder(df, cat_cols, drop_first=True)\n\ncheck_df(df)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:07:30.109257Z", "iopub.execute_input": "2021-07-26T07:07:30.109639Z", "iopub.status.idle": "2021-07-26T07:07:30.295048Z", "shell.execute_reply.started": "2021-07-26T07:07:30.109602Z", "shell.execute_reply": "2021-07-26T07:07:30.293949Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "cat_cols, num_cols, cat_but_car = grab_col_names(df)\n\nrare_analyser(df, \"SalePrice\", cat_cols)\n\nuseless_cols_new = [col for col in cat_cols if (df[col].value_counts() / len(df) <= 0.01).any(axis=None)]\n\ndf[useless_cols_new].head()\n\nfor col in useless_cols_new:\n cat_summary(df, col)\n\nrare_analyser(df, \"SalePrice\", useless_cols_new)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:08:29.870944Z", "iopub.execute_input": "2021-07-26T07:08:29.871300Z", "iopub.status.idle": "2021-07-26T07:08:31.333836Z", "shell.execute_reply.started": "2021-07-26T07:08:29.871270Z", "shell.execute_reply": "2021-07-26T07:08:31.332985Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Missing Values\n\ndef missing_values_table(dataframe, na_name=False):\n na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]\n n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)\n ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False)\n missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio'])\n print(missing_df, end=\"\\n\")\n if na_name:\n return na_columns\n\nmissing_values_table(df)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:09:27.315445Z", "iopub.execute_input": "2021-07-26T07:09:27.315809Z", "iopub.status.idle": "2021-07-26T07:09:27.369687Z", "shell.execute_reply.started": "2021-07-26T07:09:27.315782Z", "shell.execute_reply": "2021-07-26T07:09:27.368795Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "test.shape", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:09:46.826086Z", "iopub.execute_input": "2021-07-26T07:09:46.826583Z", "iopub.status.idle": "2021-07-26T07:09:46.831188Z", "shell.execute_reply.started": "2021-07-26T07:09:46.826549Z", "shell.execute_reply": "2021-07-26T07:09:46.830529Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "missing_values_table(train)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:09:51.349471Z", "iopub.execute_input": "2021-07-26T07:09:51.349805Z", "iopub.status.idle": "2021-07-26T07:09:51.395489Z", "shell.execute_reply.started": "2021-07-26T07:09:51.349776Z", "shell.execute_reply": "2021-07-26T07:09:51.394485Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "na_cols = [col for col in df.columns if df[col].isnull().sum() > 0 and \"SalePrice\" not in col]\n\ndf[na_cols] = df[na_cols].apply(lambda x: x.fillna(x.median()), axis=0)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:10:06.878568Z", "iopub.execute_input": "2021-07-26T07:10:06.878919Z", "iopub.status.idle": "2021-07-26T07:10:06.923589Z", "shell.execute_reply.started": "2021-07-26T07:10:06.878890Z", "shell.execute_reply": "2021-07-26T07:10:06.922717Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Outliers\ndef outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):\n quartile1 = dataframe[col_name].quantile(q1)\n quartile3 = dataframe[col_name].quantile(q3)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit\n\ndef check_outlier(dataframe, col_name, q1=0.25, q3=0.75):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3)\n if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None):\n return True\n else:\n return False\n\nfor col in num_cols:\n print(col, check_outlier(df, col, q1=0.01, q3=0.99))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:10:49.527070Z", "iopub.execute_input": "2021-07-26T07:10:49.527462Z", "iopub.status.idle": "2021-07-26T07:10:49.675128Z", "shell.execute_reply.started": "2021-07-26T07:10:49.527430Z", "shell.execute_reply": "2021-07-26T07:10:49.674256Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Model\n\ndf.shape", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:11:13.414822Z", "iopub.execute_input": "2021-07-26T07:11:13.415182Z", "iopub.status.idle": "2021-07-26T07:11:13.421531Z", "shell.execute_reply.started": "2021-07-26T07:11:13.415147Z", "shell.execute_reply": "2021-07-26T07:11:13.420528Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train_df = df[df['SalePrice'].notnull()]\ntest_df = df[df['SalePrice'].isnull()].drop(\"SalePrice\", axis=1)\n\ntrain_df.shape", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:11:50.314513Z", "iopub.execute_input": "2021-07-26T07:11:50.314833Z", "iopub.status.idle": "2021-07-26T07:11:50.328747Z", "shell.execute_reply.started": "2021-07-26T07:11:50.314805Z", "shell.execute_reply": "2021-07-26T07:11:50.327828Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "test_df.shape", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:11:54.769943Z", "iopub.execute_input": "2021-07-26T07:11:54.770306Z", "iopub.status.idle": "2021-07-26T07:11:54.776252Z", "shell.execute_reply.started": "2021-07-26T07:11:54.770272Z", "shell.execute_reply": "2021-07-26T07:11:54.775353Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "y = np.log1p(train_df['SalePrice'])\nX = train_df.drop([\"Id\", \"SalePrice\"], axis=1)\n\nX.shape", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:12:10.061473Z", "iopub.execute_input": "2021-07-26T07:12:10.061803Z", "iopub.status.idle": "2021-07-26T07:12:10.070561Z", "shell.execute_reply.started": "2021-07-26T07:12:10.061773Z", "shell.execute_reply": "2021-07-26T07:12:10.069801Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Base Models\n##################\n\nmodels = [('LR', LinearRegression()),\n ('CART', DecisionTreeRegressor()),\n ('RF', RandomForestRegressor()),\n ('GBM', GradientBoostingRegressor()),\n (\"XGBoost\", XGBRegressor(objective='reg:squarederror')),\n (\"LightGBM\", LGBMRegressor())]\n\nfor name, regressor in models:\n rmse = np.mean(np.sqrt(-cross_val_score(regressor, X, y, cv=3, scoring=\"neg_mean_squared_error\")))\n print(f\"RMSE: {round(rmse, 4)} ({name}) \")", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:13:34.179975Z", "iopub.execute_input": "2021-07-26T07:13:34.180513Z", "iopub.status.idle": "2021-07-26T07:13:46.021434Z", "shell.execute_reply.started": "2021-07-26T07:13:34.180464Z", "shell.execute_reply": "2021-07-26T07:13:46.020597Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "**Hyperparameter Optimization**", "metadata": {}}, {"cell_type": "code", "source": "lgbm_model = LGBMRegressor(random_state=46)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:14:00.718147Z", "iopub.execute_input": "2021-07-26T07:14:00.718708Z", "iopub.status.idle": "2021-07-26T07:14:00.723453Z", "shell.execute_reply.started": "2021-07-26T07:14:00.718659Z", "shell.execute_reply": "2021-07-26T07:14:00.722553Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# modelleme \u00f6ncesi hata:\nrmse = np.mean(np.sqrt(-cross_val_score(lgbm_model,\n X, y, cv=10, scoring=\"neg_mean_squared_error\")))\nrmse", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:14:09.193977Z", "iopub.execute_input": "2021-07-26T07:14:09.194543Z", "iopub.status.idle": "2021-07-26T07:14:11.512067Z", "shell.execute_reply.started": "2021-07-26T07:14:09.194491Z", "shell.execute_reply": "2021-07-26T07:14:11.511095Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "lgbm_params = {\"learning_rate\": [0.01, 0.005],\n \"n_estimators\": [15000, 20000],\n \"colsample_bytree\": [0.5, 0.3]}", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:14:24.944162Z", "iopub.execute_input": "2021-07-26T07:14:24.944693Z", "iopub.status.idle": "2021-07-26T07:14:24.949452Z", "shell.execute_reply.started": "2021-07-26T07:14:24.944648Z", "shell.execute_reply": "2021-07-26T07:14:24.948745Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "lgbm_gs_best = GridSearchCV(lgbm_model,\n lgbm_params,\n cv=10,\n n_jobs=-1,\n verbose=False).fit(X, y)\n\nfinal_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(X, y)\n\nrmse = np.mean(np.sqrt(-cross_val_score(final_model, X, y, cv=10, scoring=\"neg_mean_squared_error\")))\nprint(rmse)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:19:14.896357Z", "iopub.execute_input": "2021-07-26T07:19:14.896815Z", "iopub.status.idle": "2021-07-26T07:44:12.041688Z", "shell.execute_reply.started": "2021-07-26T07:19:14.896767Z", "shell.execute_reply": "2021-07-26T07:44:12.040644Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "#hiperparametrelerin default kendi de\u011feriyle rmse 0.1305858 idi.\n#optimizasyonlarla 0.12328 e indirdik", "metadata": {"execution": {"iopub.status.busy": "2021-07-25T16:00:22.702529Z", "iopub.execute_input": "2021-07-25T16:00:22.703125Z", "iopub.status.idle": "2021-07-25T16:00:23.905529Z", "shell.execute_reply.started": "2021-07-25T16:00:22.70308Z", "shell.execute_reply": "2021-07-25T16:00:23.902669Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Feature Selection\n\ndef plot_importance(model, features, num=len(X), save=False):\n feature_imp = pd.DataFrame({'Value': model.feature_importances_, 'Feature': features.columns})\n plt.figure(figsize=(10, 10))\n sns.set(font_scale=1)\n sns.barplot(x=\"Value\", y=\"Feature\", data=feature_imp.sort_values(by=\"Value\",\n ascending=False)[0:num])\n plt.title('Features')\n plt.tight_layout()\n plt.show()\n if save:\n plt.savefig('importances.png')\n\nplot_importance(final_model, X, 20)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:46:11.876847Z", "iopub.execute_input": "2021-07-26T07:46:11.877246Z", "iopub.status.idle": "2021-07-26T07:46:12.334814Z", "shell.execute_reply.started": "2021-07-26T07:46:11.877206Z", "shell.execute_reply": "2021-07-26T07:46:12.333696Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "X.shape", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:46:24.148261Z", "iopub.execute_input": "2021-07-26T07:46:24.148627Z", "iopub.status.idle": "2021-07-26T07:46:24.154697Z", "shell.execute_reply.started": "2021-07-26T07:46:24.148598Z", "shell.execute_reply": "2021-07-26T07:46:24.153512Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "feature_imp = pd.DataFrame({'Value': final_model.feature_importances_, 'Feature': X.columns})\n\ndef num_summary(dataframe, numerical_col, plot=False):\n quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]\n print(dataframe[numerical_col].describe(quantiles).T)\n\nnum_summary(feature_imp, \"Value\", True)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:46:31.518297Z", "iopub.execute_input": "2021-07-26T07:46:31.518648Z", "iopub.status.idle": "2021-07-26T07:46:31.535933Z", "shell.execute_reply.started": "2021-07-26T07:46:31.518618Z", "shell.execute_reply": "2021-07-26T07:46:31.534890Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "feature_imp[feature_imp[\"Value\"] > 0].shape\n\nfeature_imp[feature_imp[\"Value\"] < 1].shape\n\nzero_imp_cols = feature_imp[feature_imp[\"Value\"] < 1][\"Feature\"].values\n\nselected_cols = [col for col in X.columns if col not in zero_imp_cols]", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:46:35.617168Z", "iopub.execute_input": "2021-07-26T07:46:35.617681Z", "iopub.status.idle": "2021-07-26T07:46:35.631673Z", "shell.execute_reply.started": "2021-07-26T07:46:35.617633Z", "shell.execute_reply": "2021-07-26T07:46:35.630346Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# Hyperparameter Optimization with Selected Features\n\nlgbm_model = LGBMRegressor(random_state=46)\n\nlgbm_params = {\"learning_rate\": [0.01, 0.005],\n \"n_estimators\": [15000, 20000],\n \"colsample_bytree\": [0.5, 0.3]}\n\nlgbm_gs_best = GridSearchCV(lgbm_model,\n lgbm_params,\n cv=10,\n n_jobs=-1,\n verbose=True).fit(X[selected_cols], y)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:09:25.510782Z", "iopub.execute_input": "2021-07-26T08:09:25.511393Z", "iopub.status.idle": "2021-07-26T08:27:19.010522Z", "shell.execute_reply.started": "2021-07-26T08:09:25.511356Z", "shell.execute_reply": "2021-07-26T08:27:19.009468Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "y = np.log1p(train_df['SalePrice'])\nX = train_df.drop([\"Id\", \"SalePrice\"], axis=1)\n\nfinal_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(X[selected_cols], y)\n\nrmse = np.mean(np.sqrt(-cross_val_score(final_model, X[selected_cols], y, cv=10, scoring=\"neg_mean_squared_error\")))\nprint(rmse)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:27:47.212549Z", "iopub.execute_input": "2021-07-26T08:27:47.212925Z", "iopub.status.idle": "2021-07-26T08:33:44.017267Z", "shell.execute_reply.started": "2021-07-26T08:27:47.212889Z", "shell.execute_reply": "2021-07-26T08:33:44.016413Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# SONUCLARIN YUKLENMESI\n#######################################\n\nsubmission_df = pd.DataFrame()\n\nsubmission_df['Id'] = test_df[\"Id\"].astype(\"Int32\")\nsubmission_df.head()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:36:12.662294Z", "iopub.execute_input": "2021-07-26T08:36:12.662838Z", "iopub.status.idle": "2021-07-26T08:36:12.679743Z", "shell.execute_reply.started": "2021-07-26T08:36:12.662791Z", "shell.execute_reply": "2021-07-26T08:36:12.678766Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "y_pred_sub = final_model.predict(test_df[selected_cols])\ntest_df.head()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:36:18.126420Z", "iopub.execute_input": "2021-07-26T08:36:18.126817Z", "iopub.status.idle": "2021-07-26T08:36:20.634078Z", "shell.execute_reply.started": "2021-07-26T08:36:18.126784Z", "shell.execute_reply": "2021-07-26T08:36:20.633118Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "y_pred_sub = np.expm1(y_pred_sub)\n\nsubmission_df['SalePrice'] = y_pred_sub\n\nsubmission_df.to_csv('submission.csv', index=False)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:36:32.464179Z", "iopub.execute_input": "2021-07-26T08:36:32.464757Z", "iopub.status.idle": "2021-07-26T08:36:32.477098Z", "shell.execute_reply.started": "2021-07-26T08:36:32.464722Z", "shell.execute_reply": "2021-07-26T08:36:32.476183Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "submission_df", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:25.817924Z", "iopub.execute_input": "2021-07-26T08:38:25.818296Z", "iopub.status.idle": "2021-07-26T08:38:25.832790Z", "shell.execute_reply.started": "2021-07-26T08:38:25.818266Z", "shell.execute_reply": "2021-07-26T08:38:25.831646Z"}, "trusted": true}, "execution_count": null, "outputs": []}] | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046436.ipynb | null | null | [{"Id": 69046436, "ScriptId": 18841428, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6947038, "CreationDate": "07/26/2021 08:40:25", "VersionNumber": 1.0, "Title": "HousePricePrediction", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 411.0, "LinesInsertedFromPrevious": 265.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 146.0, "LinesInsertedFromFork": 265.0, "LinesDeletedFromFork": 632.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 146.0, "TotalVotes": 4}] | null | null | null | null | import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.ensemble import (
RandomForestRegressor,
GradientBoostingRegressor,
ExtraTreesRegressor,
)
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import seaborn as sns
from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
print(train.shape)
print(test.shape)
df = train.append(test).reset_index(drop=True)
print(df.shape)
df.columns
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
df["Neighborhood"].value_counts()
# Kategorik Değişken Analizi
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
for col in cat_cols:
cat_summary(df, col)
for col in cat_but_car:
cat_summary(df, col)
# Sayısal Değişken Analizi
df[num_cols].describe([0.10, 0.30, 0.50, 0.70, 0.80, 0.99]).T
# Target Analizi
df["SalePrice"].describe([0.05, 0.10, 0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]).T
def find_correlation(dataframe, numeric_cols, corr_limit=0.60):
high_correlations = []
low_correlations = []
for col in numeric_cols:
if col == "SalePrice":
pass
else:
correlation = dataframe[[col, "SalePrice"]].corr().loc[col, "SalePrice"]
print(col, correlation)
if abs(correlation) > corr_limit:
high_correlations.append(col + ": " + str(correlation))
else:
low_correlations.append(col + ": " + str(correlation))
return low_correlations, high_correlations
low_corrs, high_corrs = find_correlation(df, num_cols)
# tüm değişkenler korelasyon
corr_matrix = df.corr()
sns.clustermap(corr_matrix, annot=True, figsize=(20, 15), fmt=".2f")
plt.title("Correlation Between Features")
plt.show()
threshold = 0.60
filter = np.abs(corr_matrix["SalePrice"]) > threshold
corr_features = corr_matrix.columns[filter].tolist()
sns.clustermap(df[corr_features].corr(), annot=True, fmt=".2f")
plt.title("Correlation Between Features w/ Corr Threshold 0.60)")
plt.show()
def high_correlated_cols(dataframe, plot=False, corr_th=0.60):
corr = dataframe.corr()
cor_matrix = corr.abs()
upper_triangle_matrix = cor_matrix.where(
np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)
)
drop_list = [
col
for col in upper_triangle_matrix.columns
if any(upper_triangle_matrix[col] > corr_th)
]
if plot:
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(rc={"figure.figsize": (15, 15)})
sns.heatmap(corr, cmap="RdBu")
plt.show()
return drop_list
high_correlated_cols(df)
# FEATURE ENGINEERING
df["SqFtPerRoom"] = df["GrLivArea"] / (
df["TotRmsAbvGrd"] + df["FullBath"] + df["HalfBath"] + df["KitchenAbvGr"]
)
df["Total_Home_Quality"] = df["OverallQual"] + df["OverallCond"]
df["Total_Bathrooms"] = (
df["FullBath"]
+ (0.5 * df["HalfBath"])
+ df["BsmtFullBath"]
+ (0.5 * df["BsmtHalfBath"])
)
df["HighQualSF"] = df["1stFlrSF"] + df["2ndFlrSF"]
# Converting non-numeric predictors stored as numbers into string
df["MSSubClass"] = df["MSSubClass"].apply(str)
df["YrSold"] = df["YrSold"].apply(str)
df["MoSold"] = df["MoSold"].apply(str)
# RARE ENCODING
def rare_encoder(dataframe, rare_perc, cat_cols):
rare_columns = [
col
for col in cat_cols
if (dataframe[col].value_counts() / len(dataframe) < 0.01).sum() > 1
]
for col in rare_columns:
tmp = dataframe[col].value_counts() / len(dataframe)
rare_labels = tmp[tmp < rare_perc].index
dataframe[col] = np.where(
dataframe[col].isin(rare_labels), "Rare", dataframe[col]
)
return dataframe
def rare_analyser(dataframe, target, cat_cols):
for col in cat_cols:
print(col, ":", len(dataframe[col].value_counts()))
print(
pd.DataFrame(
{
"COUNT": dataframe[col].value_counts(),
"RATIO": dataframe[col].value_counts() / len(dataframe),
"TARGET_MEAN": dataframe.groupby(col)[target].mean(),
}
),
end="\n\n\n",
)
rare_analyser(df, "SalePrice", cat_cols)
df = rare_encoder(df, 0.01, cat_cols)
drop_list = [
"Street",
"SaleCondition",
"Functional",
"Condition2",
"Utilities",
"SaleType",
"MiscVal",
"Alley",
"LandSlope",
"PoolQC",
"MiscFeature",
"Electrical",
"Fence",
"RoofStyle",
"RoofMatl",
"FireplaceQu",
]
cat_cols = [col for col in cat_cols if col not in drop_list]
for col in drop_list:
df.drop(col, axis=1, inplace=True)
rare_analyser(df, "SalePrice", cat_cols)
useless_cols = [
col
for col in cat_cols
if df[col].nunique() == 1
or (
df[col].nunique() == 2
and (df[col].value_counts() / len(df) <= 0.01).any(axis=None)
)
]
cat_cols = [col for col in cat_cols if col not in useless_cols]
for col in useless_cols:
df.drop(col, axis=1, inplace=True)
rare_analyser(df, "SalePrice", cat_cols)
# Label Encoding & ONE-HOT ENCODING
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
cat_cols, num_cols, cat_but_car = grab_col_names(df)
cat_cols = cat_cols + cat_but_car
df = one_hot_encoder(df, cat_cols, drop_first=True)
check_df(df)
cat_cols, num_cols, cat_but_car = grab_col_names(df)
rare_analyser(df, "SalePrice", cat_cols)
useless_cols_new = [
col for col in cat_cols if (df[col].value_counts() / len(df) <= 0.01).any(axis=None)
]
df[useless_cols_new].head()
for col in useless_cols_new:
cat_summary(df, col)
rare_analyser(df, "SalePrice", useless_cols_new)
# Missing Values
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
missing_values_table(df)
test.shape
missing_values_table(train)
na_cols = [
col for col in df.columns if df[col].isnull().sum() > 0 and "SalePrice" not in col
]
df[na_cols] = df[na_cols].apply(lambda x: x.fillna(x.median()), axis=0)
# Outliers
def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name, q1=0.25, q3=0.75):
low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col, q1=0.01, q3=0.99))
# Model
df.shape
train_df = df[df["SalePrice"].notnull()]
test_df = df[df["SalePrice"].isnull()].drop("SalePrice", axis=1)
train_df.shape
test_df.shape
y = np.log1p(train_df["SalePrice"])
X = train_df.drop(["Id", "SalePrice"], axis=1)
X.shape
# Base Models
##################
models = [
("LR", LinearRegression()),
("CART", DecisionTreeRegressor()),
("RF", RandomForestRegressor()),
("GBM", GradientBoostingRegressor()),
("XGBoost", XGBRegressor(objective="reg:squarederror")),
("LightGBM", LGBMRegressor()),
]
for name, regressor in models:
rmse = np.mean(
np.sqrt(
-cross_val_score(regressor, X, y, cv=3, scoring="neg_mean_squared_error")
)
)
print(f"RMSE: {round(rmse, 4)} ({name}) ")
# **Hyperparameter Optimization**
lgbm_model = LGBMRegressor(random_state=46)
# modelleme öncesi hata:
rmse = np.mean(
np.sqrt(-cross_val_score(lgbm_model, X, y, cv=10, scoring="neg_mean_squared_error"))
)
rmse
lgbm_params = {
"learning_rate": [0.01, 0.005],
"n_estimators": [15000, 20000],
"colsample_bytree": [0.5, 0.3],
}
lgbm_gs_best = GridSearchCV(
lgbm_model, lgbm_params, cv=10, n_jobs=-1, verbose=False
).fit(X, y)
final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(X, y)
rmse = np.mean(
np.sqrt(
-cross_val_score(final_model, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
print(rmse)
# hiperparametrelerin default kendi değeriyle rmse 0.1305858 idi.
# optimizasyonlarla 0.12328 e indirdik
# Feature Selection
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(final_model, X, 20)
X.shape
feature_imp = pd.DataFrame(
{"Value": final_model.feature_importances_, "Feature": X.columns}
)
def num_summary(dataframe, numerical_col, plot=False):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
num_summary(feature_imp, "Value", True)
feature_imp[feature_imp["Value"] > 0].shape
feature_imp[feature_imp["Value"] < 1].shape
zero_imp_cols = feature_imp[feature_imp["Value"] < 1]["Feature"].values
selected_cols = [col for col in X.columns if col not in zero_imp_cols]
# Hyperparameter Optimization with Selected Features
lgbm_model = LGBMRegressor(random_state=46)
lgbm_params = {
"learning_rate": [0.01, 0.005],
"n_estimators": [15000, 20000],
"colsample_bytree": [0.5, 0.3],
}
lgbm_gs_best = GridSearchCV(
lgbm_model, lgbm_params, cv=10, n_jobs=-1, verbose=True
).fit(X[selected_cols], y)
y = np.log1p(train_df["SalePrice"])
X = train_df.drop(["Id", "SalePrice"], axis=1)
final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(
X[selected_cols], y
)
rmse = np.mean(
np.sqrt(
-cross_val_score(
final_model, X[selected_cols], y, cv=10, scoring="neg_mean_squared_error"
)
)
)
print(rmse)
# SONUCLARIN YUKLENMESI
#######################################
submission_df = pd.DataFrame()
submission_df["Id"] = test_df["Id"].astype("Int32")
submission_df.head()
y_pred_sub = final_model.predict(test_df[selected_cols])
test_df.head()
y_pred_sub = np.expm1(y_pred_sub)
submission_df["SalePrice"] = y_pred_sub
submission_df.to_csv("submission.csv", index=False)
submission_df
|
69046416 | [{"cell_type": "markdown", "source": "# King County Houses Prices: \n## Neigborhoods Classification\n<p>\nIn this notebook, I used an other dataset (SEA Building Energy Benchmarking (Source bellow)) which give us for each building GPS coords and the neighborhood (North, East, Ballard, Delridge, etc) .<br>\n I cleaned the dataset as part of a project for a data scientist training and got the idea using this to classify each King County Houses using a KNN classifier.<br>\n <br>\n It will maybe help improving algorithm performances for predicting house prices. <br>\n <br>\n <b>Results at the bottom of the notebook\n\n### Importations", "metadata": {}}, {"cell_type": "code", "source": "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\nsns.set()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.456046Z", "iopub.execute_input": "2021-07-26T08:38:43.456424Z", "iopub.status.idle": "2021-07-26T08:38:43.462760Z", "shell.execute_reply.started": "2021-07-26T08:38:43.456389Z", "shell.execute_reply": "2021-07-26T08:38:43.461446Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "data = pd.read_csv(\"../input/housesalesprediction/kc_house_data.csv\")", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.501435Z", "iopub.execute_input": "2021-07-26T08:38:43.501833Z", "iopub.status.idle": "2021-07-26T08:38:43.570957Z", "shell.execute_reply.started": "2021-07-26T08:38:43.501799Z", "shell.execute_reply": "2021-07-26T08:38:43.569886Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Exploratory Functions", "metadata": {}}, {"cell_type": "code", "source": "def describe_columns(df):\n desc_df = pd.DataFrame(index=df.columns, columns=['NaN count', 'NaN frequency (%)', 'Number of unique values'])\n desc_df['NaN count'] = df.isna().sum()\n desc_df['NaN frequency (%)'] = desc_df['NaN count']/df.shape[0]*100\n for column in df.columns:\n desc_df['Number of unique values'][column] = len(df[column].dropna().unique())\n return desc_df\n\ndef move_column(df, column_name, column_place):\n mvd_column = df.pop(column_name)\n df.insert(column_place, column_name, mvd_column)\n return df\n\ndef prop_nan(df):\n return (df.isna()).sum().sum()/df.size\n\ndef nan_map(df, save=False, filename='nan_location'):\n plt.figure(figsize=(20,10))\n sns.heatmap(df.isna())\n if save:\n plt.savefig(filename)\n \ndef corr_matrix(df, figsize=(30,20), maptype='heatmap', absolute=False, crit_value=None,\n annot=True, save=False, filename='corr_matrix'):\n \n matrix_corr = df.corr()\n \n if absolute:\n matrix_corr = matrix_corr.abs()\n if crit_value != None:\n matrix_corr = matrix_corr >= crit_value\n plt.figure(figsize=figsize)\n if maptype=='heatmap':\n sns.heatmap(matrix_corr, annot=annot)\n elif maptype=='clustermap':\n sns.clustermap(matrix_corr, annot=annot)\n \n \n if save:\n plt.savefig(filename)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.572660Z", "iopub.execute_input": "2021-07-26T08:38:43.572976Z", "iopub.status.idle": "2021-07-26T08:38:43.586682Z", "shell.execute_reply.started": "2021-07-26T08:38:43.572947Z", "shell.execute_reply": "2021-07-26T08:38:43.585486Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "df = data.copy()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.588560Z", "iopub.execute_input": "2021-07-26T08:38:43.588875Z", "iopub.status.idle": "2021-07-26T08:38:43.599454Z", "shell.execute_reply.started": "2021-07-26T08:38:43.588845Z", "shell.execute_reply": "2021-07-26T08:38:43.598578Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Columns descriptions\n\n<p>\n<b>id</b> - Unique ID for each home sold<br>\n<b>date</b> - Date of the home sale<br>\n<b>price</b> - Price of each home sold<br>\n<b>bedrooms</b> - Number of bedrooms<br>\n<b>bathrooms</b> - Number of bathrooms, where .5 accounts for a room with a toilet but no shower<br>\n<b>sqft_living</b> - Square footage of the apartments interior living space<br>\n<b>sqft_lot</b> - Square footage of the land space<br>\n<b>floors</b> - Number of floors<br>\n<b>waterfront</b> - A dummy variable for whether the apartment was overlooking the waterfront or not<br>\n<b>view</b> - An index from 0 to 4 of how good the view of the property was<br>\n<b>condition</b> - An index from 1 to 5 on the condition of the apartment,<br>\n<b>grade</b> - An index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design.<br>\n<b>sqft_above</b> - The square footage of the interior housing space that is above ground level<br>\n<b>sqft_basement</b> - The square footage of the interior housing space that is below ground level<br>\n<b>yr_built</b> - The year the house was initially built<br>\n<b>yr_renovated</b> - The year of the house\u2019s last renovation<br>\n<b>zipcode</b> - What zipcode area the house is in<br>\n<b>lat</b> - Lattitude<br>\n<b>long</b> - Longitude<br>\n<b>sqft_living15</b> - The square footage of interior housing living space for the nearest 15 neighbors<br>\n<b>sqft_lot15</b> - The square footage of the land lots of the nearest 15 neighbors<br>\n\nverified from 2 sources:<br>\nhttps://www.slideshare.net/PawanShivhare1/predicting-king-county-house-prices<br>\nhttps://rstudio-pubs-static.s3.amazonaws.com/155304_cc51f448116744069664b35e7762999f.htm<br>\n <p>", "metadata": {}}, {"cell_type": "code", "source": "df.head()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.601019Z", "iopub.execute_input": "2021-07-26T08:38:43.601668Z", "iopub.status.idle": "2021-07-26T08:38:43.630082Z", "shell.execute_reply.started": "2021-07-26T08:38:43.601621Z", "shell.execute_reply": "2021-07-26T08:38:43.628991Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Scatter 2 numerical columns", "metadata": {}}, {"cell_type": "code", "source": "def plot_2_features(df, x_name, y_name):\n plt.figure(figsize=(12,8))\n plt.scatter(df[x_name], df[y_name], s=2)\n plt.xlabel(x_name)\n plt.ylabel(y_name)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.632605Z", "iopub.execute_input": "2021-07-26T08:38:43.632981Z", "iopub.status.idle": "2021-07-26T08:38:43.639243Z", "shell.execute_reply.started": "2021-07-26T08:38:43.632944Z", "shell.execute_reply": "2021-07-26T08:38:43.637753Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Plot map with a numerical column", "metadata": {}}, {"cell_type": "code", "source": "def plot_map_num(df, y_name, interquartile=True, v=None):\n plt.figure(figsize=(20,10))\n if v != None:\n vmin = v[0]\n vmax = v[1]\n points = plt.scatter(df['long'], df['lat'], c=df[y_name], cmap='jet', lw=0, s=2, vmin=vmin, vmax=vmax)\n elif interquartile:\n desc_df = df.describe()\n vmin = desc_df.loc['25%', y_name]\n vmax = desc_df.loc['75%', y_name]\n points = plt.scatter(df['long'], df['lat'], c=df[y_name], cmap='jet', lw=0, s=2, vmin=vmin, vmax=vmax)\n else:\n points = plt.scatter(df['long'], df['lat'], c=df[y_name], cmap='jet', lw=0, s=2)\n plt.colorbar(points)\n plt.xlabel('Long')\n plt.ylabel('Lat')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.644521Z", "iopub.execute_input": "2021-07-26T08:38:43.645044Z", "iopub.status.idle": "2021-07-26T08:38:43.654418Z", "shell.execute_reply.started": "2021-07-26T08:38:43.645011Z", "shell.execute_reply": "2021-07-26T08:38:43.653450Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Plot price map", "metadata": {}}, {"cell_type": "code", "source": "plot_map_num(df, 'price', interquartile=True)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:43.662330Z", "iopub.execute_input": "2021-07-26T08:38:43.662904Z", "iopub.status.idle": "2021-07-26T08:38:44.269215Z", "shell.execute_reply.started": "2021-07-26T08:38:43.662871Z", "shell.execute_reply": "2021-07-26T08:38:44.268538Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Load dataset containing Neighborhoods with GPS coord\n\nSource: https://www.kaggle.com/city-of-seattle/sea-building-energy-benchmarking#2015-building-energy-benchmarking.csv\n\nNote: I loaded a cleaned version of the dataset that I made for a data-science online training. ", "metadata": {}}, {"cell_type": "code", "source": "neighborhood_data = pd.read_csv('../input/sea-energy-building-benchmark/data_cleaned.csv')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.270377Z", "iopub.execute_input": "2021-07-26T08:38:44.270806Z", "iopub.status.idle": "2021-07-26T08:38:44.306895Z", "shell.execute_reply.started": "2021-07-26T08:38:44.270776Z", "shell.execute_reply": "2021-07-26T08:38:44.305760Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "Selecting only the intersting columns", "metadata": {}}, {"cell_type": "code", "source": "neighborhood_df = neighborhood_data.copy()\nneighborhood_df = neighborhood_df[['Latitude', 'Longitude', 'Neighborhood']]", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.308815Z", "iopub.execute_input": "2021-07-26T08:38:44.309102Z", "iopub.status.idle": "2021-07-26T08:38:44.316554Z", "shell.execute_reply.started": "2021-07-26T08:38:44.309075Z", "shell.execute_reply": "2021-07-26T08:38:44.315454Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "neighborhood_df.head()", "metadata": {"scrolled": true, "execution": {"iopub.status.busy": "2021-07-26T08:38:44.318357Z", "iopub.execute_input": "2021-07-26T08:38:44.318992Z", "iopub.status.idle": "2021-07-26T08:38:44.335443Z", "shell.execute_reply.started": "2021-07-26T08:38:44.318953Z", "shell.execute_reply": "2021-07-26T08:38:44.334441Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "neighborhood_df['Neighborhood'].unique()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.336943Z", "iopub.execute_input": "2021-07-26T08:38:44.337273Z", "iopub.status.idle": "2021-07-26T08:38:44.350142Z", "shell.execute_reply.started": "2021-07-26T08:38:44.337238Z", "shell.execute_reply": "2021-07-26T08:38:44.348683Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Importing KNN, MinMaxScaler", "metadata": {}}, {"cell_type": "code", "source": "from sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import confusion_matrix, classification_report", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.352255Z", "iopub.execute_input": "2021-07-26T08:38:44.352972Z", "iopub.status.idle": "2021-07-26T08:38:44.361170Z", "shell.execute_reply.started": "2021-07-26T08:38:44.352920Z", "shell.execute_reply": "2021-07-26T08:38:44.360284Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "X = neighborhood_df.drop('Neighborhood', axis=1).values\ny = neighborhood_df['Neighborhood'].values", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.362804Z", "iopub.execute_input": "2021-07-26T08:38:44.363533Z", "iopub.status.idle": "2021-07-26T08:38:44.373530Z", "shell.execute_reply.started": "2021-07-26T08:38:44.363489Z", "shell.execute_reply": "2021-07-26T08:38:44.372636Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "Splitting Data", "metadata": {}}, {"cell_type": "code", "source": "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.377437Z", "iopub.execute_input": "2021-07-26T08:38:44.378051Z", "iopub.status.idle": "2021-07-26T08:38:44.385717Z", "shell.execute_reply.started": "2021-07-26T08:38:44.378003Z", "shell.execute_reply": "2021-07-26T08:38:44.384375Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "Made my own encoding class which is easy to use because I got some errors with LabelEncoder ", "metadata": {}}, {"cell_type": "code", "source": "class Encoding:\n \n def __init__(self):\n self.dico = {}\n self.inv_dico = {}\n \n def fit(self, y):\n i=0\n for classe in pd.Series(y).unique():\n self.dico[classe] = i\n self.inv_dico[i] = classe\n i+=1\n \n def transform(self, y):\n return pd.Series(y).map(self.dico).values\n \n def inverse_transform(self, y):\n return pd.Series(y).map(self.inv_dico).values", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.388366Z", "iopub.execute_input": "2021-07-26T08:38:44.388809Z", "iopub.status.idle": "2021-07-26T08:38:44.400646Z", "shell.execute_reply.started": "2021-07-26T08:38:44.388762Z", "shell.execute_reply": "2021-07-26T08:38:44.399469Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Using Neighborhoods datasets to train a model for predicting Neighborhood in df", "metadata": {}}, {"cell_type": "code", "source": "scaler = MinMaxScaler()\nX_train_scaled = scaler.fit_transform(X_train)\nX_test_scaled = scaler.transform(X_test)\n\nencoder = Encoding()\nencoder.fit(y_train)\ny_train_coded = encoder.transform(y_train)\ny_test_coded = encoder.transform(y_test)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.402387Z", "iopub.execute_input": "2021-07-26T08:38:44.403049Z", "iopub.status.idle": "2021-07-26T08:38:44.417277Z", "shell.execute_reply.started": "2021-07-26T08:38:44.403002Z", "shell.execute_reply": "2021-07-26T08:38:44.416043Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "KNeighborsClassifier with minimum optimization (maybe need more parameter or an other algorithm).<br> <b>Can be improved.", "metadata": {}}, {"cell_type": "code", "source": "model = GridSearchCV(KNeighborsClassifier(), {'n_neighbors':range(1,11)})", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.418538Z", "iopub.execute_input": "2021-07-26T08:38:44.418918Z", "iopub.status.idle": "2021-07-26T08:38:44.427323Z", "shell.execute_reply.started": "2021-07-26T08:38:44.418883Z", "shell.execute_reply": "2021-07-26T08:38:44.426470Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "<b>Fitting with training set ", "metadata": {}}, {"cell_type": "code", "source": "model.fit(X_train_scaled, y_train_coded)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:44.430077Z", "iopub.execute_input": "2021-07-26T08:38:44.431629Z", "iopub.status.idle": "2021-07-26T08:38:46.412295Z", "shell.execute_reply.started": "2021-07-26T08:38:44.431577Z", "shell.execute_reply": "2021-07-26T08:38:46.411084Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "<b>Predicting results on the test set", "metadata": {}}, {"cell_type": "code", "source": "y_pred = encoder.inverse_transform(model.predict(X_test_scaled))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:46.414167Z", "iopub.execute_input": "2021-07-26T08:38:46.414612Z", "iopub.status.idle": "2021-07-26T08:38:46.467816Z", "shell.execute_reply.started": "2021-07-26T08:38:46.414566Z", "shell.execute_reply": "2021-07-26T08:38:46.466579Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "<b>Score on the test set", "metadata": {}}, {"cell_type": "code", "source": "model.score(X_test_scaled, y_test_coded)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:46.469165Z", "iopub.execute_input": "2021-07-26T08:38:46.469514Z", "iopub.status.idle": "2021-07-26T08:38:46.522693Z", "shell.execute_reply.started": "2021-07-26T08:38:46.469481Z", "shell.execute_reply": "2021-07-26T08:38:46.521458Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Confusion Matrix", "metadata": {}}, {"cell_type": "code", "source": "plt.figure(figsize=(12,8))\nsns.heatmap(confusion_matrix(y_test, y_pred), annot=True)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:46.524373Z", "iopub.execute_input": "2021-07-26T08:38:46.524807Z", "iopub.status.idle": "2021-07-26T08:38:47.656638Z", "shell.execute_reply.started": "2021-07-26T08:38:46.524758Z", "shell.execute_reply": "2021-07-26T08:38:47.655172Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Classification report", "metadata": {}}, {"cell_type": "code", "source": "print(classification_report(y_test, y_pred))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:47.658534Z", "iopub.execute_input": "2021-07-26T08:38:47.658990Z", "iopub.status.idle": "2021-07-26T08:38:47.726191Z", "shell.execute_reply.started": "2021-07-26T08:38:47.658944Z", "shell.execute_reply": "2021-07-26T08:38:47.724940Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "<b>Adding a new column Neighborhood for King County Houses", "metadata": {}}, {"cell_type": "code", "source": "df['Neighborhood'] = encoder.inverse_transform(model.predict(scaler.transform(df[['lat', 'long']].values)))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:47.727645Z", "iopub.execute_input": "2021-07-26T08:38:47.727950Z", "iopub.status.idle": "2021-07-26T08:38:48.488053Z", "shell.execute_reply.started": "2021-07-26T08:38:47.727922Z", "shell.execute_reply": "2021-07-26T08:38:48.486821Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Plot map with a categorical column", "metadata": {}}, {"cell_type": "code", "source": "def plot_map_categ(df, categ_column):\n plt.figure(figsize=(20,10))\n for classe in df[categ_column].sort_values().unique():\n df_classe = df[df[categ_column]==classe]\n plt.scatter(df_classe['long'], df_classe['lat'], lw=0, s=10, label=classe)\n plt.legend()\n plt.xlabel('Long')\n plt.ylabel('Lat')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:48.490022Z", "iopub.execute_input": "2021-07-26T08:38:48.490535Z", "iopub.status.idle": "2021-07-26T08:38:48.498500Z", "shell.execute_reply.started": "2021-07-26T08:38:48.490484Z", "shell.execute_reply": "2021-07-26T08:38:48.496989Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Neighborhood locations\n<b>Note:</b> The Neighborhood dataset was covering a smaller area for the longitude\n. So the mountain part may not be very accurate. ", "metadata": {}}, {"cell_type": "code", "source": "plot_map_categ(df, 'Neighborhood')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:48.500316Z", "iopub.execute_input": "2021-07-26T08:38:48.500826Z", "iopub.status.idle": "2021-07-26T08:38:50.134310Z", "shell.execute_reply.started": "2021-07-26T08:38:48.500767Z", "shell.execute_reply": "2021-07-26T08:38:50.133234Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Boxplot function", "metadata": {}}, {"cell_type": "code", "source": "def boxplot_groupes(df, categ_column, target_column, figsize=(20,10)):\n groupes = []\n for cat in list(df[categ_column].unique()):\n groupes.append(df[df[categ_column]==cat][target_column])\n\n medianprops = {'color':\"black\"}\n meanprops = {'marker':'o', 'markeredgecolor':'black',\n 'markerfacecolor':'firebrick'}\n\n plt.figure(figsize=figsize)\n plt.boxplot(groupes, labels=list(df[categ_column].unique()), showfliers=False, medianprops=medianprops, \n vert=False, patch_artist=True, showmeans=True, meanprops=meanprops)\n plt.ylabel(categ_column)\n plt.xlabel(target_column)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:50.135779Z", "iopub.execute_input": "2021-07-26T08:38:50.136106Z", "iopub.status.idle": "2021-07-26T08:38:50.144559Z", "shell.execute_reply.started": "2021-07-26T08:38:50.136065Z", "shell.execute_reply": "2021-07-26T08:38:50.143498Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "<b>Boxplot Neighborhood / price", "metadata": {}}, {"cell_type": "code", "source": "boxplot_groupes(df, 'Neighborhood', 'price')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:50.146381Z", "iopub.execute_input": "2021-07-26T08:38:50.146741Z", "iopub.status.idle": "2021-07-26T08:38:50.640980Z", "shell.execute_reply.started": "2021-07-26T08:38:50.146703Z", "shell.execute_reply": "2021-07-26T08:38:50.639869Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "### Updated King County house prices dataSet with a 'Neighborhood' column", "metadata": {}}, {"cell_type": "code", "source": "df.head()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:38:50.642411Z", "iopub.execute_input": "2021-07-26T08:38:50.642732Z", "iopub.status.idle": "2021-07-26T08:38:50.669237Z", "shell.execute_reply.started": "2021-07-26T08:38:50.642699Z", "shell.execute_reply": "2021-07-26T08:38:50.667505Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "markdown", "source": "## Conclusion:\n<b>We can see some significative changes in terme of prices between neighborhoods. The model for predicting the neighborhood can be improved.", "metadata": {}}, {"cell_type": "code", "source": "", "metadata": {}, "execution_count": null, "outputs": []}] | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046416.ipynb | housesalesprediction | harlfoxem | [{"Id": 69046416, "ScriptId": 18825679, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7571614, "CreationDate": "07/26/2021 08:39:57", "VersionNumber": 4.0, "Title": "King County Houses Neighborhood Classification", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 269.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 259.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}] | [{"Id": 91775956, "KernelVersionId": 69046416, "SourceDatasetVersionId": 270}] | [{"Id": 270, "DatasetId": 128, "DatasourceVersionId": 270, "CreatorUserId": 680332, "LicenseName": "CC0: Public Domain", "CreationDate": "08/25/2016 15:52:49", "VersionNumber": 1.0, "Title": "House Sales in King County, USA", "Slug": "housesalesprediction", "Subtitle": "Predict house price using regression", "Description": "This dataset contains house sale prices for King County, which includes Seattle. It includes homes sold between May 2014 and May 2015.\n\nIt's a great dataset for evaluating simple regression models.", "VersionNotes": "Initial release", "TotalCompressedBytes": 2515206.0, "TotalUncompressedBytes": 2515206.0}] | [{"Id": 128, "CreatorUserId": 680332, "OwnerUserId": 680332.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 270.0, "CurrentDatasourceVersionId": 270.0, "ForumId": 1447, "Type": 2, "CreationDate": "08/25/2016 15:52:49", "LastActivityDate": "02/06/2018", "TotalViews": 996866, "TotalDownloads": 172516, "TotalVotes": 2041, "TotalKernels": 1225}] | [{"Id": 680332, "UserName": "harlfoxem", "DisplayName": "harlfoxem", "RegisterDate": "08/05/2016", "PerformanceTier": 1}] | # # King County Houses Prices:
# ## Neigborhoods Classification
# In this notebook, I used an other dataset (SEA Building Energy Benchmarking (Source bellow)) which give us for each building GPS coords and the neighborhood (North, East, Ballard, Delridge, etc) .
# I cleaned the dataset as part of a project for a data scientist training and got the idea using this to classify each King County Houses using a KNN classifier.
#
# It will maybe help improving algorithm performances for predicting house prices.
#
# Results at the bottom of the notebook
# ### Importations
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
sns.set()
data = pd.read_csv("../input/housesalesprediction/kc_house_data.csv")
# ### Exploratory Functions
def describe_columns(df):
desc_df = pd.DataFrame(
index=df.columns,
columns=["NaN count", "NaN frequency (%)", "Number of unique values"],
)
desc_df["NaN count"] = df.isna().sum()
desc_df["NaN frequency (%)"] = desc_df["NaN count"] / df.shape[0] * 100
for column in df.columns:
desc_df["Number of unique values"][column] = len(df[column].dropna().unique())
return desc_df
def move_column(df, column_name, column_place):
mvd_column = df.pop(column_name)
df.insert(column_place, column_name, mvd_column)
return df
def prop_nan(df):
return (df.isna()).sum().sum() / df.size
def nan_map(df, save=False, filename="nan_location"):
plt.figure(figsize=(20, 10))
sns.heatmap(df.isna())
if save:
plt.savefig(filename)
def corr_matrix(
df,
figsize=(30, 20),
maptype="heatmap",
absolute=False,
crit_value=None,
annot=True,
save=False,
filename="corr_matrix",
):
matrix_corr = df.corr()
if absolute:
matrix_corr = matrix_corr.abs()
if crit_value != None:
matrix_corr = matrix_corr >= crit_value
plt.figure(figsize=figsize)
if maptype == "heatmap":
sns.heatmap(matrix_corr, annot=annot)
elif maptype == "clustermap":
sns.clustermap(matrix_corr, annot=annot)
if save:
plt.savefig(filename)
df = data.copy()
# ### Columns descriptions
# id - Unique ID for each home sold
# date - Date of the home sale
# price - Price of each home sold
# bedrooms - Number of bedrooms
# bathrooms - Number of bathrooms, where .5 accounts for a room with a toilet but no shower
# sqft_living - Square footage of the apartments interior living space
# sqft_lot - Square footage of the land space
# floors - Number of floors
# waterfront - A dummy variable for whether the apartment was overlooking the waterfront or not
# view - An index from 0 to 4 of how good the view of the property was
# condition - An index from 1 to 5 on the condition of the apartment,
# grade - An index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design.
# sqft_above - The square footage of the interior housing space that is above ground level
# sqft_basement - The square footage of the interior housing space that is below ground level
# yr_built - The year the house was initially built
# yr_renovated - The year of the house’s last renovation
# zipcode - What zipcode area the house is in
# lat - Lattitude
# long - Longitude
# sqft_living15 - The square footage of interior housing living space for the nearest 15 neighbors
# sqft_lot15 - The square footage of the land lots of the nearest 15 neighbors
# verified from 2 sources:
# https://www.slideshare.net/PawanShivhare1/predicting-king-county-house-prices
# https://rstudio-pubs-static.s3.amazonaws.com/155304_cc51f448116744069664b35e7762999f.htm
#
df.head()
# ### Scatter 2 numerical columns
def plot_2_features(df, x_name, y_name):
plt.figure(figsize=(12, 8))
plt.scatter(df[x_name], df[y_name], s=2)
plt.xlabel(x_name)
plt.ylabel(y_name)
# ### Plot map with a numerical column
def plot_map_num(df, y_name, interquartile=True, v=None):
plt.figure(figsize=(20, 10))
if v != None:
vmin = v[0]
vmax = v[1]
points = plt.scatter(
df["long"],
df["lat"],
c=df[y_name],
cmap="jet",
lw=0,
s=2,
vmin=vmin,
vmax=vmax,
)
elif interquartile:
desc_df = df.describe()
vmin = desc_df.loc["25%", y_name]
vmax = desc_df.loc["75%", y_name]
points = plt.scatter(
df["long"],
df["lat"],
c=df[y_name],
cmap="jet",
lw=0,
s=2,
vmin=vmin,
vmax=vmax,
)
else:
points = plt.scatter(df["long"], df["lat"], c=df[y_name], cmap="jet", lw=0, s=2)
plt.colorbar(points)
plt.xlabel("Long")
plt.ylabel("Lat")
# ### Plot price map
plot_map_num(df, "price", interquartile=True)
# ### Load dataset containing Neighborhoods with GPS coord
# Source: https://www.kaggle.com/city-of-seattle/sea-building-energy-benchmarking#2015-building-energy-benchmarking.csv
# Note: I loaded a cleaned version of the dataset that I made for a data-science online training.
neighborhood_data = pd.read_csv(
"../input/sea-energy-building-benchmark/data_cleaned.csv"
)
# Selecting only the intersting columns
neighborhood_df = neighborhood_data.copy()
neighborhood_df = neighborhood_df[["Latitude", "Longitude", "Neighborhood"]]
neighborhood_df.head()
neighborhood_df["Neighborhood"].unique()
# ### Importing KNN, MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
X = neighborhood_df.drop("Neighborhood", axis=1).values
y = neighborhood_df["Neighborhood"].values
# Splitting Data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Made my own encoding class which is easy to use because I got some errors with LabelEncoder
class Encoding:
def __init__(self):
self.dico = {}
self.inv_dico = {}
def fit(self, y):
i = 0
for classe in pd.Series(y).unique():
self.dico[classe] = i
self.inv_dico[i] = classe
i += 1
def transform(self, y):
return pd.Series(y).map(self.dico).values
def inverse_transform(self, y):
return pd.Series(y).map(self.inv_dico).values
# ### Using Neighborhoods datasets to train a model for predicting Neighborhood in df
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
encoder = Encoding()
encoder.fit(y_train)
y_train_coded = encoder.transform(y_train)
y_test_coded = encoder.transform(y_test)
# KNeighborsClassifier with minimum optimization (maybe need more parameter or an other algorithm). Can be improved.
model = GridSearchCV(KNeighborsClassifier(), {"n_neighbors": range(1, 11)})
# Fitting with training set
model.fit(X_train_scaled, y_train_coded)
# Predicting results on the test set
y_pred = encoder.inverse_transform(model.predict(X_test_scaled))
# Score on the test set
model.score(X_test_scaled, y_test_coded)
# ### Confusion Matrix
plt.figure(figsize=(12, 8))
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
# ### Classification report
print(classification_report(y_test, y_pred))
# Adding a new column Neighborhood for King County Houses
df["Neighborhood"] = encoder.inverse_transform(
model.predict(scaler.transform(df[["lat", "long"]].values))
)
# ### Plot map with a categorical column
def plot_map_categ(df, categ_column):
plt.figure(figsize=(20, 10))
for classe in df[categ_column].sort_values().unique():
df_classe = df[df[categ_column] == classe]
plt.scatter(df_classe["long"], df_classe["lat"], lw=0, s=10, label=classe)
plt.legend()
plt.xlabel("Long")
plt.ylabel("Lat")
# ### Neighborhood locations
# Note: The Neighborhood dataset was covering a smaller area for the longitude
# . So the mountain part may not be very accurate.
plot_map_categ(df, "Neighborhood")
# ### Boxplot function
def boxplot_groupes(df, categ_column, target_column, figsize=(20, 10)):
groupes = []
for cat in list(df[categ_column].unique()):
groupes.append(df[df[categ_column] == cat][target_column])
medianprops = {"color": "black"}
meanprops = {
"marker": "o",
"markeredgecolor": "black",
"markerfacecolor": "firebrick",
}
plt.figure(figsize=figsize)
plt.boxplot(
groupes,
labels=list(df[categ_column].unique()),
showfliers=False,
medianprops=medianprops,
vert=False,
patch_artist=True,
showmeans=True,
meanprops=meanprops,
)
plt.ylabel(categ_column)
plt.xlabel(target_column)
# Boxplot Neighborhood / price
boxplot_groupes(df, "Neighborhood", "price")
# ### Updated King County house prices dataSet with a 'Neighborhood' column
df.head()
|
69046500 | [{"cell_type": "code", "source": "import numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.multioutput import MultiOutputRegressor\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:55.905109Z", "iopub.execute_input": "2021-07-26T06:22:55.905515Z", "iopub.status.idle": "2021-07-26T06:22:56.997939Z", "shell.execute_reply.started": "2021-07-26T06:22:55.905403Z", "shell.execute_reply": "2021-07-26T06:22:56.996999Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train = pd.read_csv('/kaggle/input/tabular-playground-series-jul-2021/train.csv')\ntest = pd.read_csv('/kaggle/input/tabular-playground-series-jul-2021/test.csv')\nsub = pd.read_csv('/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv')", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:56.999435Z", "iopub.execute_input": "2021-07-26T06:22:56.999773Z", "iopub.status.idle": "2021-07-26T06:22:57.058685Z", "shell.execute_reply.started": "2021-07-26T06:22:56.999737Z", "shell.execute_reply": "2021-07-26T06:22:57.057722Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train = train.set_index(\"date_time\").copy()\ntest = test.set_index(\"date_time\").copy()", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:57.062200Z", "iopub.execute_input": "2021-07-26T06:22:57.062464Z", "iopub.status.idle": "2021-07-26T06:22:57.080274Z", "shell.execute_reply.started": "2021-07-26T06:22:57.062428Z", "shell.execute_reply": "2021-07-26T06:22:57.079515Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "target_cols = [col for col in train.columns if col.startswith('target')]\nfeat_cols = [col for col in train.columns if col not in target_cols]", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:57.083298Z", "iopub.execute_input": "2021-07-26T06:22:57.083670Z", "iopub.status.idle": "2021-07-26T06:22:57.090197Z", "shell.execute_reply.started": "2021-07-26T06:22:57.083637Z", "shell.execute_reply": "2021-07-26T06:22:57.089348Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "train, val = train_test_split(train, test_size=0.2, random_state=42)\nfea_scaler = MinMaxScaler()\nlab_scaler = MinMaxScaler()\n\nXtrain_scaled = fea_scaler.fit_transform(train.drop(target_cols[:],axis=1))\nXval_scaled = fea_scaler.transform(val.drop(target_cols[:],axis=1))\nYtrain_scaled =lab_scaler.fit_transform(train[target_cols[:]])\nYval_scaled =lab_scaler.transform(val[target_cols[:]])\nXtest_scaled = fea_scaler.transform(test)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:22:57.093728Z", "iopub.execute_input": "2021-07-26T06:22:57.094039Z", "iopub.status.idle": "2021-07-26T06:22:57.122282Z", "shell.execute_reply.started": "2021-07-26T06:22:57.094004Z", "shell.execute_reply": "2021-07-26T06:22:57.121530Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}\nmodel = xgb.XGBRegressor(**other_params)\nmultioutputregressor = MultiOutputRegressor(xgb.XGBRegressor(objective='reg:squarederror',**other_params)).fit(Xtrain_scaled, Ytrain_scaled)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:35:58.783410Z", "iopub.execute_input": "2021-07-26T08:35:58.783744Z", "iopub.status.idle": "2021-07-26T08:36:03.669588Z", "shell.execute_reply.started": "2021-07-26T08:35:58.783713Z", "shell.execute_reply": "2021-07-26T08:36:03.668683Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'n_estimators': [400, 500, 600, 700, 800]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_ \n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T06:30:28.413953Z", "iopub.execute_input": "2021-07-26T06:30:28.414280Z", "iopub.status.idle": "2021-07-26T06:36:38.837216Z", "shell.execute_reply.started": "2021-07-26T06:30:28.414251Z", "shell.execute_reply": "2021-07-26T06:36:38.836204Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'max_depth': [3, 4, 5, 6, 7, 8, 9, 10]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_ \n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:50:48.265828Z", "iopub.execute_input": "2021-07-26T07:50:48.266183Z", "iopub.status.idle": "2021-07-26T08:00:43.221204Z", "shell.execute_reply.started": "2021-07-26T07:50:48.266150Z", "shell.execute_reply": "2021-07-26T08:00:43.220325Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'min_child_weight': [1, 2, 3, 4, 5, 6]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_ \n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:06:26.434258Z", "iopub.execute_input": "2021-07-26T08:06:26.434607Z", "iopub.status.idle": "2021-07-26T08:12:39.517724Z", "shell.execute_reply.started": "2021-07-26T08:06:26.434573Z", "shell.execute_reply": "2021-07-26T08:12:39.516614Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_\n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T07:34:35.386076Z", "iopub.execute_input": "2021-07-26T07:34:35.386394Z", "iopub.status.idle": "2021-07-26T07:40:45.010251Z", "shell.execute_reply.started": "2021-07-26T07:34:35.386365Z", "shell.execute_reply": "2021-07-26T07:40:45.009245Z"}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'subsample': [0.6, 0.7, 0.8, 0.9], 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# #evalute_result = optimized_GBM.grid_scores_\n# #print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\nprint('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:13:25.544492Z", "iopub.execute_input": "2021-07-26T08:13:25.544821Z", "iopub.status.idle": "2021-07-26T08:28:44.136295Z", "shell.execute_reply.started": "2021-07-26T08:13:25.544790Z", "shell.execute_reply": "2021-07-26T08:28:44.133547Z"}, "collapsed": true, "jupyter": {"outputs_hidden": true}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# #evalute_result = optimized_GBM.grid_scores_\n# #print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "# cv_params = {'n_estimators': [400, 500, 600, 700, 800], \n# 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10], \n# 'min_child_weight': [1, 2, 3, 4, 5, 6], \n# 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], \n# 'subsample': [0.6, 0.7, 0.8, 0.9], \n# 'colsample_bytree': [0.6, 0.7, 0.8, 0.9], \n# 'reg_alpha': [0.05, 0.1, 1, 2, 3], \n# 'reg_lambda': [0.05, 0.1, 1, 2, 3], \n# 'learning_rate': [0.01, 0.05, 0.07, 0.1, 0.2]}\n# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,\n# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}\n# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=10)\n# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])\n# evalute_result = optimized_GBM.cv_results_\n# print('\u6bcf\u8f6e\u8fed\u4ee3\u8fd0\u884c\u7ed3\u679c:{0}'.format(evalute_result))\n# print('\u53c2\u6570\u7684\u6700\u4f73\u53d6\u503c\uff1a{0}'.format(optimized_GBM.best_params_))\n# print('\u6700\u4f73\u6a21\u578b\u5f97\u5206:{0}'.format(optimized_GBM.best_score_))", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:31:59.643770Z", "iopub.execute_input": "2021-07-26T08:31:59.644124Z", "iopub.status.idle": "2021-07-26T08:33:50.113972Z", "shell.execute_reply.started": "2021-07-26T08:31:59.644093Z", "shell.execute_reply": "2021-07-26T08:33:50.110025Z"}, "collapsed": true, "jupyter": {"outputs_hidden": true}, "trusted": true}, "execution_count": null, "outputs": []}, {"cell_type": "code", "source": "pred = multioutputregressor.predict(Xtest_scaled)\npred = lab_scaler.inverse_transform(pred)\npred = pred.reshape(2247, 3)\nsub[target_cols[:]] = pred\nsub.to_csv('sample_submission.csv', index=0)", "metadata": {"execution": {"iopub.status.busy": "2021-07-26T08:39:22.229237Z", "iopub.execute_input": "2021-07-26T08:39:22.229569Z", "iopub.status.idle": "2021-07-26T08:39:22.274571Z", "shell.execute_reply.started": "2021-07-26T08:39:22.229537Z", "shell.execute_reply": "2021-07-26T08:39:22.273824Z"}, "trusted": true}, "execution_count": null, "outputs": []}] | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046500.ipynb | null | null | [{"Id": 69046500, "ScriptId": 18816238, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7392108, "CreationDate": "07/26/2021 08:41:26", "VersionNumber": 2.0, "Title": "XGBoost", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 76.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 44.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}] | null | null | null | null | import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputRegressor
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv")
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
sub = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv"
)
train = train.set_index("date_time").copy()
test = test.set_index("date_time").copy()
target_cols = [col for col in train.columns if col.startswith("target")]
feat_cols = [col for col in train.columns if col not in target_cols]
train, val = train_test_split(train, test_size=0.2, random_state=42)
fea_scaler = MinMaxScaler()
lab_scaler = MinMaxScaler()
Xtrain_scaled = fea_scaler.fit_transform(train.drop(target_cols[:], axis=1))
Xval_scaled = fea_scaler.transform(val.drop(target_cols[:], axis=1))
Ytrain_scaled = lab_scaler.fit_transform(train[target_cols[:]])
Yval_scaled = lab_scaler.transform(val[target_cols[:]])
Xtest_scaled = fea_scaler.transform(test)
other_params = {
"learning_rate": 0.1,
"n_estimators": 400,
"max_depth": 4,
"min_child_weight": 5,
"seed": 0,
"subsample": 0.8,
"colsample_bytree": 0.8,
"gamma": 0.1,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
}
model = xgb.XGBRegressor(**other_params)
multioutputregressor = MultiOutputRegressor(
xgb.XGBRegressor(objective="reg:squarederror", **other_params)
).fit(Xtrain_scaled, Ytrain_scaled)
# cv_params = {'n_estimators': [400, 500, 600, 700, 800]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'max_depth': [3, 4, 5, 6, 7, 8, 9, 10]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'min_child_weight': [1, 2, 3, 4, 5, 6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'subsample': [0.6, 0.7, 0.8, 0.9], 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
print("最佳模型得分:{0}".format(optimized_GBM.best_score_))
# cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'n_estimators': [400, 500, 600, 700, 800],
# 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10],
# 'min_child_weight': [1, 2, 3, 4, 5, 6],
# 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
# 'subsample': [0.6, 0.7, 0.8, 0.9],
# 'colsample_bytree': [0.6, 0.7, 0.8, 0.9],
# 'reg_alpha': [0.05, 0.1, 1, 2, 3],
# 'reg_lambda': [0.05, 0.1, 1, 2, 3],
# 'learning_rate': [0.01, 0.05, 0.07, 0.1, 0.2]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=10)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
pred = multioutputregressor.predict(Xtest_scaled)
pred = lab_scaler.inverse_transform(pred)
pred = pred.reshape(2247, 3)
sub[target_cols[:]] = pred
sub.to_csv("sample_submission.csv", index=0)
|
69046748 | "[{\"cell_type\": \"code\", \"source\": \"!pip install -q py-readability-metrics\", \"metadata\": {\(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046748.ipynb | commonlit-various | markwijkhuizen | "[{\"Id\": 69046748, \"ScriptId\": 17247939, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | "[{\"Id\": 91776584, \"KernelVersionId\": 69046748, \"SourceDatasetVersionId\": 2462427}, {\"Id\": 9(...TRUNCATED) | "[{\"Id\": 2462427, \"DatasetId\": 1390791, \"DatasourceVersionId\": 2504854, \"CreatorUserId\": 443(...TRUNCATED) | "[{\"Id\": 1390791, \"CreatorUserId\": 4433335, \"OwnerUserId\": 4433335.0, \"OwnerOrganizationId\":(...TRUNCATED) | "[{\"Id\": 4433335, \"UserName\": \"markwijkhuizen\", \"DisplayName\": \"Mark Wijkhuizen\", \"Regist(...TRUNCATED) | "import warnings\n\nwarnings.simplefilter(\"ignore\")\nimport numpy as np\nimport pandas as pd\nimpo(...TRUNCATED) |
69046772 | "[{\"cell_type\": \"code\", \"source\": \"# This Python 3 environment comes with many helpful analyt(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046772.ipynb | null | null | "[{\"Id\": 69046772, \"ScriptId\": 18842152, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd(...TRUNCATED) |
69046835 | "[{\"cell_type\": \"code\", \"source\": \"# This Python 3 environment comes with many helpful analyt(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046835.ipynb | shark-attack-dataset | felipeesc | "[{\"Id\": 69046835, \"ScriptId\": 18842772, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | [{"Id": 91776789, "KernelVersionId": 69046835, "SourceDatasetVersionId": 2462873}] | "[{\"Id\": 2462873, \"DatasetId\": 1490782, \"DatasourceVersionId\": 2505301, \"CreatorUserId\": 775(...TRUNCATED) | "[{\"Id\": 1490782, \"CreatorUserId\": 7756990, \"OwnerUserId\": 7756990.0, \"OwnerOrganizationId\":(...TRUNCATED) | "[{\"Id\": 7756990, \"UserName\": \"felipeesc\", \"DisplayName\": \"Felipe_Esc\", \"RegisterDate\": (...TRUNCATED) | "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd(...TRUNCATED) |
69046608 | "[{\"cell_type\": \"code\", \"source\": \"# This Python 3 environment comes with many helpful analyt(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046608.ipynb | null | null | "[{\"Id\": 69046608, \"ScriptId\": 18842199, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd(...TRUNCATED) |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 26