path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2033003/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt import seaborn as sns plt.close() from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = haberman['age'] y = haberman['operation_year'] z = haberman['axil_nodes'] ax.scatter(x, y, z, marker='o', c='r') ax.set_xlabel('age') ax.set_ylabel('operation_year') ax.set_zlabel('') plt.show()
code
2033003/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt import seaborn as sns plt.close() #3D scattered plot. from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig=plt.figure() ax=fig.add_subplot(111, projection='3d') x=haberman["age"] y=haberman["operation_year"] z=haberman["axil_nodes"] ax.scatter(x,y,z,marker='o', c='r'); ax.set_xlabel('age') ax.set_ylabel('operation_year') ax.set_zlabel('') plt.show() import numpy as np plt.plot(haberman['axil_nodes'], np.zeros_like(haberman['axil_nodes']), 'o') plt.show()
code
2033003/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') print(haberman.shape)
code
2033003/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt import seaborn as sns plt.close() sns.pairplot(haberman, hue='status') plt.show()
code
2033003/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt haberman['status'].value_counts()
code
2033003/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt haberman.plot(kind='scatter', x='age', y='axil_nodes') plt.show()
code
2033003/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') haberman.head(5)
code
72084578/cell_13
[ "text_plain_output_1.png" ]
from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import pandas as pd import random import tensorflow as tf import tensorflow_hub as hub train_dir = '../input/yoga-poses-dataset/DATASET/TRAIN' test_dir = '../input/yoga-poses-dataset/DATASET/TEST' def plot_yoga_images(train_dir): for i, col in enumerate(os.listdir(train_dir)): image = random.choice(os.listdir(train_dir + '/' + col)) image_path = train_dir + '/' + col + '/' + image img = mpimg.imread(image_path) / 255 plt.axis(False) i = i + 1 from tensorflow.keras.preprocessing.image import ImageDataGenerator IMAGE_SHAPE = (224, 224) BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) train_data = train_datagen.flow_from_directory(train_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') test_data = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') import tensorflow_hub as hub from tensorflow.keras import layers efficientnet_url = 'https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1' resnet_url = 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5' def create_model(model_url, num_classes=5): feature_extractor_layer = hub.KerasLayer(model_url, trainable=False, name='feature_extraction_layer', input_shape=IMAGE_SHAPE + (3,)) model = tf.keras.Sequential([feature_extractor_layer, layers.Dense(num_classes, activation='softmax', name='output_layer')]) return model def plot_loss(history): model_df = pd.DataFrame(history.history) loss = model_df.loss val_loss = model_df.val_loss accuracy = model_df.accuracy val_accuracy = model_df.val_accuracy epochs = range(len(model_df.loss)) resnet_model = create_model(resnet_url, num_classes=train_data.num_classes) resnet_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) resnet_history = resnet_model.fit(train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data)) plot_loss(resnet_history)
code
72084578/cell_8
[ "image_output_2.png", "image_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator train_dir = '../input/yoga-poses-dataset/DATASET/TRAIN' test_dir = '../input/yoga-poses-dataset/DATASET/TEST' from tensorflow.keras.preprocessing.image import ImageDataGenerator IMAGE_SHAPE = (224, 224) BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) print('Training Images') train_data = train_datagen.flow_from_directory(train_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') print('Testing Images') test_data = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical')
code
72084578/cell_15
[ "image_output_2.png", "image_output_1.png" ]
from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import pandas as pd import random import tensorflow as tf import tensorflow_hub as hub train_dir = '../input/yoga-poses-dataset/DATASET/TRAIN' test_dir = '../input/yoga-poses-dataset/DATASET/TEST' def plot_yoga_images(train_dir): for i, col in enumerate(os.listdir(train_dir)): image = random.choice(os.listdir(train_dir + '/' + col)) image_path = train_dir + '/' + col + '/' + image img = mpimg.imread(image_path) / 255 plt.axis(False) i = i + 1 from tensorflow.keras.preprocessing.image import ImageDataGenerator IMAGE_SHAPE = (224, 224) BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) train_data = train_datagen.flow_from_directory(train_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') test_data = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') import tensorflow_hub as hub from tensorflow.keras import layers efficientnet_url = 'https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1' resnet_url = 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5' def create_model(model_url, num_classes=5): feature_extractor_layer = hub.KerasLayer(model_url, trainable=False, name='feature_extraction_layer', input_shape=IMAGE_SHAPE + (3,)) model = tf.keras.Sequential([feature_extractor_layer, layers.Dense(num_classes, activation='softmax', name='output_layer')]) return model def plot_loss(history): model_df = pd.DataFrame(history.history) loss = model_df.loss val_loss = model_df.val_loss accuracy = model_df.accuracy val_accuracy = model_df.val_accuracy epochs = range(len(model_df.loss)) resnet_model = create_model(resnet_url, num_classes=train_data.num_classes) resnet_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) resnet_history = resnet_model.fit(train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data)) efficientnet_model = create_model(efficientnet_url, num_classes=train_data.num_classes) efficientnet_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) efficientnet_history = efficientnet_model.fit(train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data)) plot_loss(efficientnet_history)
code
72084578/cell_14
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow as tf import tensorflow_hub as hub train_dir = '../input/yoga-poses-dataset/DATASET/TRAIN' test_dir = '../input/yoga-poses-dataset/DATASET/TEST' from tensorflow.keras.preprocessing.image import ImageDataGenerator IMAGE_SHAPE = (224, 224) BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) train_data = train_datagen.flow_from_directory(train_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') test_data = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') import tensorflow_hub as hub from tensorflow.keras import layers efficientnet_url = 'https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1' resnet_url = 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5' def create_model(model_url, num_classes=5): feature_extractor_layer = hub.KerasLayer(model_url, trainable=False, name='feature_extraction_layer', input_shape=IMAGE_SHAPE + (3,)) model = tf.keras.Sequential([feature_extractor_layer, layers.Dense(num_classes, activation='softmax', name='output_layer')]) return model resnet_model = create_model(resnet_url, num_classes=train_data.num_classes) resnet_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) resnet_history = resnet_model.fit(train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data)) efficientnet_model = create_model(efficientnet_url, num_classes=train_data.num_classes) efficientnet_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) efficientnet_history = efficientnet_model.fit(train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data))
code
72084578/cell_12
[ "image_output_1.png" ]
from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow as tf import tensorflow_hub as hub train_dir = '../input/yoga-poses-dataset/DATASET/TRAIN' test_dir = '../input/yoga-poses-dataset/DATASET/TEST' from tensorflow.keras.preprocessing.image import ImageDataGenerator IMAGE_SHAPE = (224, 224) BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1 / 255.0) test_datagen = ImageDataGenerator(rescale=1 / 255.0) train_data = train_datagen.flow_from_directory(train_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') test_data = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SHAPE, batch_size=BATCH_SIZE, class_mode='categorical') import tensorflow_hub as hub from tensorflow.keras import layers efficientnet_url = 'https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1' resnet_url = 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5' def create_model(model_url, num_classes=5): feature_extractor_layer = hub.KerasLayer(model_url, trainable=False, name='feature_extraction_layer', input_shape=IMAGE_SHAPE + (3,)) model = tf.keras.Sequential([feature_extractor_layer, layers.Dense(num_classes, activation='softmax', name='output_layer')]) return model resnet_model = create_model(resnet_url, num_classes=train_data.num_classes) resnet_model.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) resnet_history = resnet_model.fit(train_data, epochs=5, steps_per_epoch=len(train_data), validation_data=test_data, validation_steps=len(test_data))
code
72084578/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import random train_dir = '../input/yoga-poses-dataset/DATASET/TRAIN' test_dir = '../input/yoga-poses-dataset/DATASET/TEST' def plot_yoga_images(train_dir): for i, col in enumerate(os.listdir(train_dir)): image = random.choice(os.listdir(train_dir + '/' + col)) image_path = train_dir + '/' + col + '/' + image img = mpimg.imread(image_path) / 255 plt.axis(False) i = i + 1 plot_yoga_images(train_dir)
code
128037854/cell_4
[ "text_plain_output_1.png" ]
# 开始训练模型前50轮 !python /kaggle/working/yolov5-6-1ming/train.py --img 544 --batch 16 --epochs 50 --data /kaggle/working/widerpersonming/WiderPerson/person.yaml --cfg /kaggle/working/widerpersonming/yolov5s_SE.yaml
code
128037854/cell_6
[ "text_plain_output_1.png" ]
import datetime import os import os import os import zipfile import os import zipfile import datetime def file2zip(packagePath, zipPath): """ :param packagePath: 文件夹路径 :param zipPath: 压缩包路径 :return: """ zip = zipfile.ZipFile(zipPath, 'w', zipfile.ZIP_DEFLATED) for path, dirNames, fileNames in os.walk(packagePath): fpath = path.replace(packagePath, '') for name in fileNames: fullName = os.path.join(path, name) name = fpath + '\\' + name zip.write(fullName, name) zip.close() if __name__ == '__main__': packagePath = '/kaggle/working/yolov5-6-1ming/runs/train/exp2' zipPath = '/kaggle/working/yolov5s_SE-50-544-simplified-one-layer-only-pedestrians.zip' if os.path.exists(zipPath): os.remove(zipPath) file2zip(packagePath, zipPath) print('打包完成') print(datetime.datetime.utcnow())
code
128037854/cell_2
[ "text_plain_output_1.png" ]
import os filepath = '/kaggle/working/widerpersonming/WiderPerson/person.yaml' datas = [] datas.append('train: /kaggle/working/widerpersonming/WiderPerson/train/images') datas.append('\n') datas.append('val: /kaggle/working/widerpersonming/WiderPerson/val/images') datas.append('\n') datas.append('nc: 1') datas.append('\n') datas.append("names: ['pedestrians']") datas.append('\n') print(datas) with open(filepath, 'w') as f: f.writelines(datas)
code
128037854/cell_1
[ "text_plain_output_1.png" ]
import shutil import shutil shutil.copytree('../input/yolov5-6-1ming-with-se', './yolov5-6-1ming') shutil.copytree('../input/widerpersonming-only-pedestrians', './widerpersonming')
code
128037854/cell_7
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from IPython.display import FileLink FileLink('yolov5s_SE-50-544-simplified-one-layer-only-pedestrians.zip')
code
128037854/cell_3
[ "text_plain_output_1.png" ]
import os filepath = '/kaggle/working/widerpersonming/WiderPerson/person.yaml' datas = [] datas.append('train: /kaggle/working/widerpersonming/WiderPerson/train/images') datas.append('\n') datas.append('val: /kaggle/working/widerpersonming/WiderPerson/val/images') datas.append('\n') datas.append('nc: 1') datas.append('\n') datas.append("names: ['pedestrians']") datas.append('\n') with open(filepath, 'w') as f: f.writelines(datas) import os filepath = '/kaggle/working/widerpersonming/WiderPerson/person.yaml' with open(filepath, 'r') as f: datas = f.readlines() print(datas) print(type(datas))
code
128025412/cell_4
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) display(df_iris.head(3)) display(df_iris.tail(3)) display(df_iris.describe())
code
128025412/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size()
code
128025412/cell_26
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) new_data = pd.DataFrame([[5.1, 3.5, 1.4, 0.2], [6.2, 2.8, 4.8, 1.8], [7.3, 3.0, 6.3, 2.5]]) new_data.columns = X.columns display(new_data)
code
128025412/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd from pandas.plotting import andrews_curves from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer from sklearn.metrics import RocCurveDisplay, classification_report from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns
code
128025412/cell_11
[ "text_html_output_2.png", "text_html_output_1.png", "text_html_output_3.png" ]
from pandas.plotting import andrews_curves from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) andrews_curves(df_iris.drop('Id', axis=1), 'Species') plt.figure() sns.pairplot(df_iris.drop('Id', axis=1), hue='Species', height=3, markers=['o', 's', 'D']) plt.show()
code
128025412/cell_19
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import RocCurveDisplay, classification_report from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())]) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_pred, y_test, target_names=list(le.classes_)))
code
128025412/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) display(X.head(3), y.head(3)) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) display(X_train.describe(), y_test.describe())
code
128025412/cell_15
[ "text_html_output_4.png", "text_html_output_2.png", "text_html_output_1.png", "text_html_output_3.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())]) model.fit(X_train, y_train)
code
128025412/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas.plotting import andrews_curves from sklearn.linear_model import LogisticRegression from sklearn.metrics import RocCurveDisplay, classification_report from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) andrews_curves(df_iris.drop('Id', axis=1), 'Species') df_iris.drop('Id', axis=1).boxplot(by='Species', figsize=(15, 10)) model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())]) model.fit(X_train, y_train) y_pred = model.predict(X_test) y_score = model.predict_proba(X_test) def show_roc(class_of_interest): label_binarizer = LabelBinarizer().fit(y_train) y_onehot_test = label_binarizer.transform(y_test) label_binarizer.transform([class_of_interest]) class_id = np.flatnonzero(label_binarizer.classes_ == class_of_interest)[0] RocCurveDisplay.from_predictions(y_onehot_test[:, class_id], y_score[:, class_id], name=f'{class_of_interest} vs the rest') plt.axis('square') show_roc('Iris-setosa') show_roc('Iris-versicolor') show_roc('Iris-virginica')
code
128025412/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pandas.plotting import andrews_curves from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import matplotlib.pyplot as plt import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) plt.figure(figsize=(15, 10)) andrews_curves(df_iris.drop('Id', axis=1), 'Species') plt.title('Andrews Curves Plot', fontsize=20, fontweight='bold') plt.legend(loc=1, prop={'size': 15}, frameon=True, facecolor='white', edgecolor='black') plt.show()
code
128025412/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import RocCurveDisplay, classification_report from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import pandas as pd df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) model = Pipeline([('scaler', StandardScaler()), ('classifier', LogisticRegression())]) model.fit(X_train, y_train) y_pred = model.predict(X_test) y_train['Species'] = le.inverse_transform(y_train['Species']) y_test['Species'] = le.inverse_transform(y_test['Species']) y_score = model.predict_proba(X_test) new_data = pd.DataFrame([[5.1, 3.5, 1.4, 0.2], [6.2, 2.8, 4.8, 1.8], [7.3, 3.0, 6.3, 2.5]]) new_data.columns = X.columns predictions = model.predict(new_data.values) print(le.inverse_transform(predictions))
code
128025412/cell_12
[ "text_plain_output_1.png" ]
from pandas.plotting import andrews_curves from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_iris = pd.DataFrame(pd.read_csv('/kaggle/input/iris/Iris.csv')) df_iris.groupby('Species').size() X = df_iris.iloc[:, 1:5] y = pd.DataFrame(df_iris.iloc[:, 5]) le = LabelEncoder() y['Species'] = le.fit_transform(y['Species']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) andrews_curves(df_iris.drop('Id', axis=1), 'Species') plt.figure() df_iris.drop('Id', axis=1).boxplot(by='Species', figsize=(15, 10)) plt.show()
code
50242767/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(x, y) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=3) x_poly = poly_reg.fit_transform(x) lin_reg2 = LinearRegression() lin_reg2.fit(x_poly, y) x_grid = np.arange(min(x), max(x), 0.1) x_grid = x_grid.reshape((len(x_grid), 1)) plt.scatter(x, y, color='red') plt.plot(x, lin_reg2.predict(x_poly), color='blue') plt.xlabel('Age') plt.ylabel('Height') plt.title('Polynomial Linear Regression ')
code
50242767/cell_9
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(x, y)
code
50242767/cell_25
[ "text_plain_output_1.png" ]
import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values data[data['Age'] == 30]
code
50242767/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv')
code
50242767/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(x, y) plt.scatter(x, y, color='red') plt.plot(x, lin_reg.predict(x), color='blue') plt.xlabel('Age') plt.ylabel('Height') plt.title('Linear Regression for Height and Weight')
code
50242767/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=3) x_poly = poly_reg.fit_transform(x) lin_reg2 = LinearRegression() lin_reg2.fit(x_poly, y)
code
50242767/cell_15
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(x, y) lin_reg.predict([[30]])
code
50242767/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') data.head(4)
code
50242767/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np import os import pandas as pd data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') x = data.iloc[:, :-1].values y = data.iloc[:, -1].values from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(x, y) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=3) x_poly = poly_reg.fit_transform(x) lin_reg2 = LinearRegression() lin_reg2.fit(x_poly, y) x_grid = np.arange(min(x), max(x), 0.1) x_grid = x_grid.reshape((len(x_grid), 1)) lin_reg2.predict(poly_reg.fit_transform([[30]]))
code
50242767/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/heightvsweight-for-linear-polynomial-regression/HeightVsWeight.csv') sns.scatterplot(x='Age', y='Height', data=data)
code
88090787/cell_42
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape)
code
88090787/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.describe()
code
88090787/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.info()
code
88090787/cell_4
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns
code
88090787/cell_56
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) ages = np.zeros((2, 3)) ages for d in data: for i in range(0, 2): for j in range(0, 3): age_data = d[(d['Sex'] == i) & (d['Pclass'] == j + 1)]['Age'].dropna() age = age_data.median() ages[i, j] = age for i in range(0, 2): for j in range(0, 3): d.loc[d['Age'].isnull() & (d['Sex'] == i) & (d['Pclass'] == j + 1), 'Age'] = ages[i, j] d['Age'] = d['Age'].astype(int) for d in data: d.loc[d.Age <= 16, 'Age'] = 0 d.loc[(d.Age > 16) & (d.Age <= 32), 'Age'] = 1 d.loc[(d.Age > 32) & (d.Age <= 48), 'Age'] = 2 d.loc[(d.Age > 48) & (d.Age <= 64), 'Age'] = 3 d.loc[d.Age > 64, 'Age'] = 4 train = train.drop('SegAge', axis=1) data = [train, test] train.head()
code
88090787/cell_33
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns g = sns.FacetGrid(data=train, col='Survived') g = g.map(plt.hist, 'Age', bins=25) grid = sns.FacetGrid(data=train, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.7, bins=20) grid.add_legend()
code
88090787/cell_44
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) pd.crosstab(train['Title'], train['Sex'])
code
88090787/cell_55
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) ages = np.zeros((2, 3)) ages for d in data: for i in range(0, 2): for j in range(0, 3): age_data = d[(d['Sex'] == i) & (d['Pclass'] == j + 1)]['Age'].dropna() age = age_data.median() ages[i, j] = age for i in range(0, 2): for j in range(0, 3): d.loc[d['Age'].isnull() & (d['Sex'] == i) & (d['Pclass'] == j + 1), 'Age'] = ages[i, j] d['Age'] = d['Age'].astype(int) for d in data: d.loc[d.Age <= 16, 'Age'] = 0 d.loc[(d.Age > 16) & (d.Age <= 32), 'Age'] = 1 d.loc[(d.Age > 32) & (d.Age <= 48), 'Age'] = 2 d.loc[(d.Age > 48) & (d.Age <= 64), 'Age'] = 3 d.loc[d.Age > 64, 'Age'] = 4 train.head()
code
88090787/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.head()
code
88090787/cell_29
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train[['Parch', 'Survived']].groupby('Parch', as_index=False).mean().sort_values(by='Parch', ascending=True)
code
88090787/cell_39
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns g = sns.FacetGrid(data=train, col='Survived') g = g.map(plt.hist, 'Age', bins=25) grid = sns.FacetGrid(data=train, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.7, bins=20) grid.add_legend() g = sns.FacetGrid(data=train, row='Embarked', size=2.2, aspect=1.6) g.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') g.add_legend() g = sns.FacetGrid(data=train, col='Survived', row='Embarked', size=2.2, aspect=2.2) g.map(sns.barplot, 'Sex', 'Fare') g.add_legend()
code
88090787/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train[['Sex', 'Survived']].groupby('Sex', as_index=False).mean().sort_values(by='Survived', ascending=False)
code
88090787/cell_48
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape)
code
88090787/cell_54
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) pd.crosstab(train['Title'], train['Sex']) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) ages = np.zeros((2, 3)) ages for d in data: for i in range(0, 2): for j in range(0, 3): age_data = d[(d['Sex'] == i) & (d['Pclass'] == j + 1)]['Age'].dropna() age = age_data.median() ages[i, j] = age for i in range(0, 2): for j in range(0, 3): d.loc[d['Age'].isnull() & (d['Sex'] == i) & (d['Pclass'] == j + 1), 'Age'] = ages[i, j] d['Age'] = d['Age'].astype(int) train['SegAge'] = pd.cut(train['Age'], 5) train[['SegAge', 'Survived']].groupby('SegAge', as_index=False).mean()
code
88090787/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.info()
code
88090787/cell_50
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns g = sns.FacetGrid(data=train, col='Survived') g = g.map(plt.hist, 'Age', bins=25) grid = sns.FacetGrid(data=train, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.7, bins=20) grid.add_legend() g = sns.FacetGrid(data=train, row='Embarked', size=2.2, aspect=1.6) g.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') g.add_legend() g = sns.FacetGrid(data=train, col='Survived', row='Embarked', size=2.2, aspect=2.2) g.map(sns.barplot, 'Sex', 'Fare') g.add_legend() train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) g = sns.FacetGrid(train, col='Sex', row='Pclass', size=2.2, aspect=1.6) g.map(plt.hist, 'Age', alpha=0.7, bins=20) g.add_legend()
code
88090787/cell_52
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) ages = np.zeros((2, 3)) ages for d in data: for i in range(0, 2): for j in range(0, 3): age_data = d[(d['Sex'] == i) & (d['Pclass'] == j + 1)]['Age'].dropna() age = age_data.median() ages[i, j] = age for i in range(0, 2): for j in range(0, 3): d.loc[d['Age'].isnull() & (d['Sex'] == i) & (d['Pclass'] == j + 1), 'Age'] = ages[i, j] d['Age'] = d['Age'].astype(int) train.head()
code
88090787/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean()
code
88090787/cell_49
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) train.head(10)
code
88090787/cell_18
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.describe(include=['O'])
code
88090787/cell_51
[ "text_html_output_1.png" ]
import numpy as np ages = np.zeros((2, 3)) ages
code
88090787/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train[['SibSp', 'Survived']].groupby('SibSp', as_index=False).mean().sort_values(by='SibSp', ascending=True)
code
88090787/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train.head()
code
88090787/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape
code
88090787/cell_43
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) train['Title'] = train['Name'].str.extract('([A-Za-z]+)\\.') test['Title'] = test['Name'].str.extract('([A-Za-z]+)\\.') (train['Title'], test['Title'])
code
88090787/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns g = sns.FacetGrid(data=train, col='Survived') g = g.map(plt.hist, 'Age', bins=25)
code
88090787/cell_24
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
88090787/cell_53
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') data = pd.concat([train, test]) data.shape train.columns train = train.drop(['Cabin', 'Ticket'], axis=1) test = test.drop(['Cabin', 'Ticket'], axis=1) (train.shape, test.shape) data = [train, test] for d in data: d['Title'] = d['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer', 'Lady', 'Major', 'Sir', 'Rev'], 'Rare') d['Title'] = d['Title'].replace(['Mlle', 'Mme'], 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby('Title').mean() titles = {'Master': 1, 'Miss': 2, 'Mr': 3, 'Mrs': 4, 'Ms': 5, 'Rare': 6} for d in data: d['Title'] = d['Title'].map(titles) d['Title'] = d['Title'].fillna(0) train = train.drop(['PassengerId', 'Name'], axis=1) test = test.drop(['PassengerId', 'Name'], axis=1) data = [train, test] (train.shape, test.shape) for i in data: i['Sex'] = i['Sex'].map({'male': 0, 'female': 1}).astype(int) ages = np.zeros((2, 3)) ages for d in data: for i in range(0, 2): for j in range(0, 3): age_data = d[(d['Sex'] == i) & (d['Pclass'] == j + 1)]['Age'].dropna() age = age_data.median() ages[i, j] = age for i in range(0, 2): for j in range(0, 3): d.loc[d['Age'].isnull() & (d['Sex'] == i) & (d['Pclass'] == j + 1), 'Age'] = ages[i, j] d['Age'] = d['Age'].astype(int) (train['Age'].isnull().sum(), test['Age'].isnull().sum())
code
88090787/cell_37
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns g = sns.FacetGrid(data=train, col='Survived') g = g.map(plt.hist, 'Age', bins=25) grid = sns.FacetGrid(data=train, col='Survived', row='Pclass', size=2.2, aspect=1.6) grid.map(plt.hist, 'Age', alpha=0.7, bins=20) grid.add_legend() g = sns.FacetGrid(data=train, row='Embarked', size=2.2, aspect=1.6) g.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep') g.add_legend()
code
88090787/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.head()
code
1006521/cell_4
[ "text_html_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import numpy as np import pandas as pd pd.options.mode.chained_assignment = None import plotly.plotly as py import plotly.graph_objs as go from plotly import tools from plotly.offline import iplot, init_notebook_mode init_notebook_mode(connected=True) terror_data = pd.read_csv('../input/globalterrorismdb_0616dist.csv', encoding='ISO-8859-1', usecols=[0, 1, 2, 3, 8, 11, 13, 14, 35, 84, 100, 103]) terror_data = terror_data.rename(columns={'eventid': 'id', 'iyear': 'year', 'imonth': 'month', 'iday': 'day', 'country_txt': 'country', 'provstate': 'state', 'targtype1_txt': 'target', 'weaptype1_txt': 'weapon', 'nkill': 'fatalities', 'nwound': 'injuries'}) terror_data['fatalities'] = terror_data['fatalities'].fillna(0).astype(int) terror_data['injuries'] = terror_data['injuries'].fillna(0).astype(int) attacks_france = terror_data[terror_data.country == 'France'] terror_peryear = np.asarray(attacks_france.groupby('year').year.count()) terror_years = np.arange(1972, 2016) terror_years = np.delete(terror_years, [23]) trace = [go.Scatter(x=terror_years, y=terror_peryear, mode='lines', line=dict(color='rgb(240, 140, 45)', width=3))] layout = go.Layout(title='Terrorist Attacks by Year in France (1970-2015)', xaxis=dict(rangeslider=dict(thickness=0.05), showline=True, showgrid=False), yaxis=dict(range=[0.1, 425], showline=True, showgrid=False)) figure = dict(data=trace, layout=layout) iplot(figure)
code
1006521/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import numpy as np import pandas as pd pd.options.mode.chained_assignment = None import plotly.plotly as py import plotly.graph_objs as go from plotly import tools from plotly.offline import iplot, init_notebook_mode init_notebook_mode(connected=True) terror_data = pd.read_csv('../input/globalterrorismdb_0616dist.csv', encoding='ISO-8859-1', usecols=[0, 1, 2, 3, 8, 11, 13, 14, 35, 84, 100, 103]) terror_data = terror_data.rename(columns={'eventid': 'id', 'iyear': 'year', 'imonth': 'month', 'iday': 'day', 'country_txt': 'country', 'provstate': 'state', 'targtype1_txt': 'target', 'weaptype1_txt': 'weapon', 'nkill': 'fatalities', 'nwound': 'injuries'}) terror_data['fatalities'] = terror_data['fatalities'].fillna(0).astype(int) terror_data['injuries'] = terror_data['injuries'].fillna(0).astype(int) attacks_france = terror_data[terror_data.country == 'France'] terror_peryear = np.asarray(attacks_france.groupby('year').year.count()) terror_years = np.arange(1972, 2016) terror_years = np.delete(terror_years, [23]) trace = [go.Scatter(x=terror_years, y=terror_peryear, mode='lines', line=dict(color='rgb(240, 140, 45)', width=3))] layout = go.Layout(title='Terrorist Attacks by Year in France (1970-2015)', xaxis=dict(rangeslider=dict(thickness=0.05), showline=True, showgrid=False), yaxis=dict(range=[0.1, 425], showline=True, showgrid=False)) figure = dict(data=trace, layout=layout) attacks_france['text'] = attacks_france['date'].dt.strftime('%B %-d, %Y') + '<br>' + attacks_france['fatalities'].astype(str) + ' Killed, ' + attacks_france['injuries'].astype(str) + ' Injured' fatality = dict(type='scattergeo', locationmode='USA-states', lon=attacks_france[attacks_france.fatalities > 0]['longitude'], lat=attacks_france[attacks_france.fatalities > 0]['latitude'], text=attacks_france[attacks_france.fatalities > 0]['text'], mode='markers', name='Fatalities', hoverinfo='text+name', marker=dict(size=attacks_france[attacks_france.fatalities > 0]['fatalities'] ** 0.255 * 8, opacity=0.95, color='rgb(240, 140, 45)')) injury = dict(type='scattergeo', locationmode='USA-states', lon=attacks_france[attacks_france.fatalities == 0]['longitude'], lat=attacks_france[attacks_france.fatalities == 0]['latitude'], text=attacks_france[attacks_france.fatalities == 0]['text'], mode='markers', name='Injuries', hoverinfo='text+name', marker=dict(size=(attacks_france[attacks_france.fatalities == 0]['injuries'] + 1) ** 0.245 * 8, opacity=0.85, color='rgb(20, 150, 187)')) layout = dict(title='Terrorist Attacks by Latitude/Longitude in France (1970-2015)', showlegend=True, legend=dict(x=0.85, y=0.4), geo=dict(scope='europe', projection=dict(type='albers usa'), showland=True, landcolor='rgb(250, 250, 250)', subunitwidth=1, subunitcolor='rgb(217, 217, 217)', countrywidth=1, countrycolor='rgb(217, 217, 217)', showlakes=True, lakecolor='rgb(255, 255, 255)')) data = [fatality, injury] figure = dict(data=data, layout=layout) iplot(figure)
code
1006521/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1006521/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import numpy as np import pandas as pd pd.options.mode.chained_assignment = None import plotly.plotly as py import plotly.graph_objs as go from plotly import tools from plotly.offline import iplot, init_notebook_mode init_notebook_mode(connected=True) terror_data = pd.read_csv('../input/globalterrorismdb_0616dist.csv', encoding='ISO-8859-1', usecols=[0, 1, 2, 3, 8, 11, 13, 14, 35, 84, 100, 103]) terror_data = terror_data.rename(columns={'eventid': 'id', 'iyear': 'year', 'imonth': 'month', 'iday': 'day', 'country_txt': 'country', 'provstate': 'state', 'targtype1_txt': 'target', 'weaptype1_txt': 'weapon', 'nkill': 'fatalities', 'nwound': 'injuries'}) terror_data['fatalities'] = terror_data['fatalities'].fillna(0).astype(int) terror_data['injuries'] = terror_data['injuries'].fillna(0).astype(int) attacks_france = terror_data[terror_data.country == 'France'] terror_peryear = np.asarray(attacks_france.groupby('year').year.count()) terror_years = np.arange(1972, 2016) terror_years = np.delete(terror_years, [23]) trace = [go.Scatter(x=terror_years, y=terror_peryear, mode='lines', line=dict(color='rgb(240, 140, 45)', width=3))] layout = go.Layout(title='Terrorist Attacks by Year in France (1970-2015)', xaxis=dict(rangeslider=dict(thickness=0.05), showline=True, showgrid=False), yaxis=dict(range=[0.1, 425], showline=True, showgrid=False)) figure = dict(data=trace, layout=layout) attacks_france['text'] = attacks_france['date'].dt.strftime('%B %-d, %Y') + '<br>' + attacks_france['fatalities'].astype(str) + ' Killed, ' + attacks_france['injuries'].astype(str) + ' Injured' fatality = dict(type='scattergeo', locationmode='USA-states', lon=attacks_france[attacks_france.fatalities > 0]['longitude'], lat=attacks_france[attacks_france.fatalities > 0]['latitude'], text=attacks_france[attacks_france.fatalities > 0]['text'], mode='markers', name='Fatalities', hoverinfo='text+name', marker=dict(size=attacks_france[attacks_france.fatalities > 0]['fatalities'] ** 0.255 * 8, opacity=0.95, color='rgb(240, 140, 45)')) injury = dict(type='scattergeo', locationmode='USA-states', lon=attacks_france[attacks_france.fatalities == 0]['longitude'], lat=attacks_france[attacks_france.fatalities == 0]['latitude'], text=attacks_france[attacks_france.fatalities == 0]['text'], mode='markers', name='Injuries', hoverinfo='text+name', marker=dict(size=(attacks_france[attacks_france.fatalities == 0]['injuries'] + 1) ** 0.245 * 8, opacity=0.85, color='rgb(20, 150, 187)')) layout = dict(title='Terrorist Attacks by Latitude/Longitude in France (1970-2015)', showlegend=True, legend=dict(x=0.85, y=0.4), geo=dict(scope='europe', projection=dict(type='albers usa'), showland=True, landcolor='rgb(250, 250, 250)', subunitwidth=1, subunitcolor='rgb(217, 217, 217)', countrywidth=1, countrycolor='rgb(217, 217, 217)', showlakes=True, lakecolor='rgb(255, 255, 255)')) data = [fatality, injury] figure = dict(data=data, layout=layout) target_codes = [] for attack in attacks_france['target'].values: if attack in ['Business', 'Journalists & Media', 'NGO']: target_codes.append(1) elif attack in ['Government (General)', 'Government (Diplomatic)']: target_codes.append(2) elif attack == 'Abortion Related': target_codes.append(4) elif attack == 'Educational Institution': target_codes.append(5) elif attack == 'Police': target_codes.append(6) elif attack == 'Military': target_codes.append(7) elif attack == 'Religious Figures/Institutions': target_codes.append(8) elif attack in ['Airports & Aircraft', 'Maritime', 'Transportation']: target_codes.append(9) elif attack in ['Food or Water Supply', 'Telecommunication', 'Utilities']: target_codes.append(10) else: target_codes.append(3) attacks_france['target'] = target_codes target_categories = ['Business', 'Government', 'Individuals', 'Healthcare', 'Education', 'Police', 'Military', 'Religion', 'Transportation', 'Infrastructure'] target_count = np.asarray(attacks_france.groupby('target').target.count()) target_percent = np.round(target_count / sum(target_count) * 100, 2) target_fatality = np.asarray(attacks_france.groupby('target')['fatalities'].sum()) target_yaxis = np.asarray([1.33, 2.36, 2.98, 0.81, 1.25, 1.71, 1.31, 1.53, 1.34, 0]) target_injury = np.asarray(attacks_france.groupby('target')['injuries'].sum()) target_xaxis = np.log10(target_injury) target_text = [] for i in range(0, 10): target_text.append(target_categories[i] + ' (' + target_percent[i].astype(str) + '%)<br>' + target_fatality[i].astype(str) + ' Killed, ' + target_injury[i].astype(str) + ' Injured') data = [go.Scatter(x=target_injury, y=target_fatality, text=target_text, mode='markers', hoverinfo='text', marker=dict(size=target_count / 6.5, opacity=0.9, color='rgb(240, 140, 45)'))] layout = go.Layout(title='Terrorist Attacks by Target in France (1970-2015)', xaxis=dict(title='Injuries', type='log', range=[1.36, 3.25], tickmode='auto', nticks=2, showline=True, showgrid=False), yaxis=dict(title='Fatalities', type='log', range=[0.59, 3.45], tickmode='auto', nticks=4, showline=True, showgrid=False)) annotations = [] for i in range(0, 10): annotations.append(dict(x=target_xaxis[i], y=target_yaxis[i], xanchor='middle', yanchor='top', text=target_categories[i], showarrow=False)) layout['annotations'] = annotations figure = dict(data=data, layout=layout) iplot(figure)
code
72065241/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sweetviz train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') report = sweetviz.analyze([train_data, 'train'], 'target')
code
72065241/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72065241/cell_3
[ "text_html_output_1.png" ]
!pip install sweetviz
code
72065241/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sweetviz train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') report = sweetviz.analyze([train_data, 'train'], 'target') my_report = sweetviz.compare([train_data, 'train'], [test_data, 'test'], 'target')
code
72065241/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sweetviz train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') report = sweetviz.analyze([train_data, 'train'], 'target') report.show_notebook()
code
130003745/cell_4
[ "text_plain_output_5.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_14.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "application_vnd.jupyter.stderr_output_20.png", "text_plain_output_23.png", "text_plain_output_28.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
from catboost import CatBoostClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import fbeta_score,accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from time import time from xgboost import XGBClassifier import numpy as np # linear algebra import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle import numpy as np import pandas as pd import os # TODO: Import two metrics from sklearn - fbeta_score and accuracy_score from sklearn.metrics import fbeta_score,accuracy_score from time import time def train_predict(learner, sample_size, X_train, y_train, X_test, y_test): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: income training set - X_test: features testing set - y_test: income testing set ''' results = {} # TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:]) start = time() # Get start time learner = learner.fit(X_train[:sample_size],y_train[:sample_size]) end = time() # Get end time # TODO: Calculate the training time results['train_time'] = end-start # TODO: Get the predictions on the test set(X_test), # then get predictions on the first 300 training samples(X_train) using .predict() start = time() # Get start time predictions_test = learner.predict(X_test) predictions_train = learner.predict(X_train[:300]) end = time() # Get end time # TODO: Calculate the total prediction time results['pred_time'] = end-start # TODO: Compute accuracy on the first 300 training samples which is y_train[:300] results['acc_train'] = accuracy_score(y_train[:300],predictions_train) # TODO: Compute accuracy on test set using accuracy_score() results['acc_test'] = accuracy_score(y_test, predictions_test) # TODO: Compute F-score on the the first 300 training samples using fbeta_score() # results['f_train'] = fbeta_score(y_train[:300],predictions_train, beta=0.5) # # TODO: Compute F-score on the test set which is y_test # results['f_test'] = fbeta_score(y_test, predictions_test, beta=0.5) # Success print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size)) # Return the results return results def evaluate(results): """ Visualization code to display results of various learners. inputs: - learners: a list of supervised learners - stats: a list of dictionaries of the statistic results from 'train_predict()' - accuracy: The score for the naive predictor - f1: The score for the naive predictor """ # Create figure fig, ax = pl.subplots(2, 3, figsize = (11,7)) # Constants bar_width = 0.23 colors = ['#A00000','#00A0A0','#00A000', '#ffffff'] # Super loop to plot four panels of data for k, learner in enumerate(results.keys()): for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']): for i in np.arange(3): try: # Creative plot code ax[j//3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k]) ax[j//3, j%3].set_xticks([0.45, 1.45, 2.45]) ax[j//3, j%3].set_xticklabels(["1%", "10%", "100%"]) ax[j//3, j%3].set_xlabel("Training Set Size") ax[j//3, j%3].set_xlim((-0.1, 3.0)) except: print('+++',learner, i, metric,'+++') # Add unique y-labels ax[0, 0].set_ylabel("Time (in seconds)") ax[0, 1].set_ylabel("Accuracy Score") ax[0, 2].set_ylabel("F-score") ax[1, 0].set_ylabel("Time (in seconds)") ax[1, 1].set_ylabel("Accuracy Score") ax[1, 2].set_ylabel("F-score") # Add titles ax[0, 0].set_title("Model Training") ax[0, 1].set_title("Accuracy Score on Training Subset") ax[0, 2].set_title("F-score on Training Subset") ax[1, 0].set_title("Model Predicting") ax[1, 1].set_title("Accuracy Score on Testing Set") ax[1, 2].set_title("F-score on Testing Set") # Add horizontal lines for naive predictors ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') # Set y-limits for score panels ax[0, 1].set_ylim((0, 1)) ax[0, 2].set_ylim((0, 1)) ax[1, 1].set_ylim((0, 1)) ax[1, 2].set_ylim((0, 1)) # Create patches for the legend patches = [] for i, learner in enumerate(results.keys()): patches.append(mpatches.Patch(color = colors[i], label = learner)) pl.legend(handles = patches, bbox_to_anchor = (-.80, 2.53), \ loc = 'upper center', borderaxespad = 0., ncol = 3, fontsize = 'x-large') # Aesthetics pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10) pl.tight_layout() pl.show() from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from xgboost import XGBClassifier import pandas as pd from sklearn.model_selection import train_test_split def train_df(filename): df = pd.read_csv(filename, index_col=0) m = list(df[' Label'].value_counts().index) j = {} h = 1 for i in m: if i == 'BENIGN': j[i] = 0 else: j[i] = h h += 1 df[' Label'] = [j[x] for x in df[' Label']] df = df.dropna() y = df[' Label'] X = df.drop(columns=['Flow ID', ' Label', ' Source IP', ' Destination IP', ' Timestamp', 'SimillarHTTP']).clip(-1000000.0, 1000000.0) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123) clf_A = GradientBoostingClassifier(random_state=42) clf_B = RandomForestClassifier(random_state=42) clf_C = KNeighborsClassifier() samples_100 = len(y_train) samples_10 = int(samples_100 * 0.1) samples_1 = int(samples_100 * 0.01) results = {} for clf in [clf_A, clf_B, clf_C]: clf_name = clf.__class__.__name__ results[clf_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_test, y_test) clf_D = LGBMClassifier(random_state=42) clf_E = CatBoostClassifier(random_state=42, verbose=False) clf_F = XGBClassifier(random_state=42) for clf in [clf_D, clf_E, clf_F]: clf_name = clf.__class__.__name__ results[clf_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_test, y_test) return results import pickle import os, operator, sys dirpath = '/kaggle/input/ddos-evaluation-dataset-cic-ddos2019/CSV-01-12/01-12' all_files = (os.path.join(basedir, filename) for basedir, dirs, files in os.walk(dirpath) for filename in files) files_and_sizes = ((path, os.path.getsize(path)) for path in all_files) sorted_files_with_size = sorted(files_and_sizes, key=operator.itemgetter(1)) sorted_files_with_size cont_flag = True for filename, size in sorted_files_with_size: if cont_flag: if 'DrDoS_SSDP' in filename: cont_flag = False continue res = train_df(filename) pickle.dump(res, open('results_' + filename.split('/')[-1].replace('.csv', '') + '.p', 'wb')) print(filename.split('/')[-1].replace('.csv', ''), ' Done\n')
code
130003745/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import fbeta_score,accuracy_score from time import time import numpy as np # linear algebra from sklearn.metrics import fbeta_score, accuracy_score from time import time def train_predict(learner, sample_size, X_train, y_train, X_test, y_test): """ inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: income training set - X_test: features testing set - y_test: income testing set """ results = {} start = time() learner = learner.fit(X_train[:sample_size], y_train[:sample_size]) end = time() results['train_time'] = end - start start = time() predictions_test = learner.predict(X_test) predictions_train = learner.predict(X_train[:300]) end = time() results['pred_time'] = end - start results['acc_train'] = accuracy_score(y_train[:300], predictions_train) results['acc_test'] = accuracy_score(y_test, predictions_test) print('{} trained on {} samples.'.format(learner.__class__.__name__, sample_size)) return results def evaluate(results): """ Visualization code to display results of various learners. inputs: - learners: a list of supervised learners - stats: a list of dictionaries of the statistic results from 'train_predict()' - accuracy: The score for the naive predictor - f1: The score for the naive predictor """ fig, ax = pl.subplots(2, 3, figsize=(11, 7)) bar_width = 0.23 colors = ['#A00000', '#00A0A0', '#00A000', '#ffffff'] for k, learner in enumerate(results.keys()): for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']): for i in np.arange(3): try: ax[j // 3, j % 3].bar(i + k * bar_width, results[learner][i][metric], width=bar_width, color=colors[k]) ax[j // 3, j % 3].set_xticks([0.45, 1.45, 2.45]) ax[j // 3, j % 3].set_xticklabels(['1%', '10%', '100%']) ax[j // 3, j % 3].set_xlabel('Training Set Size') ax[j // 3, j % 3].set_xlim((-0.1, 3.0)) except: print('+++', learner, i, metric, '+++') ax[0, 0].set_ylabel('Time (in seconds)') ax[0, 1].set_ylabel('Accuracy Score') ax[0, 2].set_ylabel('F-score') ax[1, 0].set_ylabel('Time (in seconds)') ax[1, 1].set_ylabel('Accuracy Score') ax[1, 2].set_ylabel('F-score') ax[0, 0].set_title('Model Training') ax[0, 1].set_title('Accuracy Score on Training Subset') ax[0, 2].set_title('F-score on Training Subset') ax[1, 0].set_title('Model Predicting') ax[1, 1].set_title('Accuracy Score on Testing Set') ax[1, 2].set_title('F-score on Testing Set') ax[0, 1].axhline(y=accuracy, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed') ax[1, 1].axhline(y=accuracy, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed') ax[0, 2].axhline(y=f1, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed') ax[1, 2].axhline(y=f1, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed') ax[0, 1].set_ylim((0, 1)) ax[0, 2].set_ylim((0, 1)) ax[1, 1].set_ylim((0, 1)) ax[1, 2].set_ylim((0, 1)) patches = [] for i, learner in enumerate(results.keys()): patches.append(mpatches.Patch(color=colors[i], label=learner)) pl.legend(handles=patches, bbox_to_anchor=(-0.8, 2.53), loc='upper center', borderaxespad=0.0, ncol=3, fontsize='x-large') pl.suptitle('Performance Metrics for Three Supervised Learning Models', fontsize=16, y=1.1) pl.tight_layout() pl.show()
code
130003745/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
327044/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csva')
code
327044/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
34120494/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd diabetes_data = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') diabetes_data[:60] diabetes_data.shape diabetes_data.isna().sum() diabetes_data.dtypes
code
34120494/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.svm import LinearSVC svc_model = LinearSVC(max_iter=10000) svc_model.fit(X_train, y_train)
code
34120494/cell_2
[ "text_html_output_1.png" ]
import pandas as pd diabetes_data = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') diabetes_data.head()
code
34120494/cell_7
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC import numpy as np svc_model = LinearSVC(max_iter=10000) svc_model.fit(X_train, y_train) svc_score = svc_model.score(X_test, y_test) if svc_score < 0.6: print(f'SVC Model Score is Less : {svc_score}'.format()) else: random_clf = RandomForestClassifier(n_estimators=100) random_clf.fit(X_train, y_train) patient_sample = np.array([[0, 137, 40, 35, 168, 43.1, 2.244, 30]]) prediction = random_clf.predict(patient_sample) if prediction == 0: print('You are not expected to be diabetic') elif prediction == 1: print('You are expected to be diabetic')
code
34120494/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd diabetes_data = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') diabetes_data[:60] diabetes_data.shape
code
104119796/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/destiny-2-guns/guns.csv') features = ['Element ', 'Rarity'] fig, ax = plt.subplots(1, len(features), figsize=(16, 5), sharex=False) for cnt, feature in enumerate(['Element ', 'Rarity']): df.groupby(['weapon_type', feature]).size().unstack().plot(kind='barh', stacked=True, ax=ax[cnt]) plt.tight_layout() plt.show()
code
104119796/cell_1
[ "text_plain_output_1.png" ]
import os import seaborn as sns import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os import warnings sns.set_style('darkgrid') warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104119796/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os import warnings sns.set_style('darkgrid') warnings.filterwarnings('ignore') df = pd.read_csv('/kaggle/input/destiny-2-guns/guns.csv') # count of weapons, breakdown by Element and Rarity features = ['Element ','Rarity'] fig, ax = plt.subplots(1,len(features),figsize=(16,5), sharex=False) for cnt, feature in enumerate(['Element ','Rarity']): df.groupby(['weapon_type',feature]).size().unstack().plot(kind='barh', stacked=True, ax=ax[cnt]) plt.tight_layout() plt.show() features = ['Element ', 'Rarity', 'weapon_type'] fig, ax = plt.subplots(2, 3, figsize=(18, 8)) for cnt, f in enumerate(features): current_col = int(cnt % 3) tmp = df[[f, 'gun_RoF']].sort_values(by=['gun_RoF'], ascending=True) sns.boxplot(ax=ax[0, current_col], data=tmp, x='gun_RoF', y=f, linewidth=2, showfliers=False) sns.histplot(ax=ax[1, current_col], data=tmp, x='gun_RoF', hue=f, kde=True, bins=20, legend=False, line_kws={'lw': 2}, alpha=0.5) plt.tight_layout() plt.show()
code
88103172/cell_4
[ "text_plain_output_1.png" ]
from aitextgen import aitextgen from aitextgen.TokenDataset import TokenDataset from aitextgen.tokenizers import train_tokenizer from aitextgen.utils import GPTNeoConfigCPU file_name = '/kaggle/input/annomi/dataset.txt' train_tokenizer(file_name) tokenizer_file = 'aitextgen.tokenizer.json' config = GPTNeoConfigCPU() ai = aitextgen(tokenizer_file=tokenizer_file, config=config) data = TokenDataset(file_name, tokenizer_file=tokenizer_file, block_size=64)
code
88103172/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "application_vnd.jupyter.stderr_output_1.png" ]
from aitextgen import aitextgen from aitextgen.TokenDataset import TokenDataset from aitextgen.tokenizers import train_tokenizer from aitextgen.utils import GPTNeoConfigCPU file_name = '/kaggle/input/annomi/dataset.txt' train_tokenizer(file_name) tokenizer_file = 'aitextgen.tokenizer.json' config = GPTNeoConfigCPU() ai = aitextgen(tokenizer_file=tokenizer_file, config=config) data = TokenDataset(file_name, tokenizer_file=tokenizer_file, block_size=64) ai.train(data, batch_size=16, num_steps=50000, generate_every=1000, save_every=1000)
code
88103172/cell_1
[ "text_plain_output_1.png" ]
! pip install aitextgen
code
88103172/cell_7
[ "text_plain_output_1.png" ]
from aitextgen import aitextgen ai2 = aitextgen(model_folder='./trained_model', tokenizer_file='aitextgen.tokenizer.json') ai2.generate(10, prompt='<START>\nTHERAPIST:\nHi, Emily.\nCLIENT:\nHi. I am feeling low and have been drinking alcohol every day. What do you think that is happening\n')
code
50240545/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/catch-me-if-you-can/train_sessions.csv', index_col='session_id') test_df = pd.read_csv('../input/catch-me-if-you-can/test_sessions.csv', index_col='session_id') features = pd.DataFrame() timepoints = train_df[['time%s' % i for i in range(1, 11)]] sites = train_df[['site%s' % i for i in range(1, 11)]].fillna(0).astype(int).values for td_index in range(1, 10): features['time_diff{}'.format(td_index)] = (pd.to_datetime(timepoints['time{}'.format(td_index + 1)]) - pd.to_datetime(timepoints['time{}'.format(td_index)])).dt.total_seconds().fillna(0) features['time_of_session'] = np.sum(features, axis=1) features['hour'] = pd.to_datetime(timepoints['time1']).dt.hour features['day_of_week'] = pd.to_datetime(timepoints['time1']).dt.weekday features['unique_sites'] = [len(np.unique(session[session != 0])) for session in sites] plt.figure(figsize=(15, 10)) sns.distplot(features['time_of_session'], label='time') plt.ylabel('Кол-во') plt.xlabel('Продолжительность сессии') plt.grid()
code
50240545/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/catch-me-if-you-can/train_sessions.csv', index_col='session_id') test_df = pd.read_csv('../input/catch-me-if-you-can/test_sessions.csv', index_col='session_id') train_df.head()
code
50240545/cell_5
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/catch-me-if-you-can/train_sessions.csv', index_col='session_id') test_df = pd.read_csv('../input/catch-me-if-you-can/test_sessions.csv', index_col='session_id') features = pd.DataFrame() timepoints = train_df[['time%s' % i for i in range(1, 11)]] sites = train_df[['site%s' % i for i in range(1, 11)]].fillna(0).astype(int).values for td_index in range(1, 10): features['time_diff{}'.format(td_index)] = (pd.to_datetime(timepoints['time{}'.format(td_index + 1)]) - pd.to_datetime(timepoints['time{}'.format(td_index)])).dt.total_seconds().fillna(0) features['time_of_session'] = np.sum(features, axis=1) features['hour'] = pd.to_datetime(timepoints['time1']).dt.hour features['day_of_week'] = pd.to_datetime(timepoints['time1']).dt.weekday features['unique_sites'] = [len(np.unique(session[session != 0])) for session in sites] features.head()
code