path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128010348/cell_33
[ "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.preprocessing import MinMaxScaler from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.wrappers.scikit_learn import KerasClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) neurons = 1024 dropout_rate = 0.5 kernel_regularizer = 0.0001 learning_rate = 0.0001 batch_size = 32 epochs = 1000 input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(X_train_ex, y_train_ex, validation_data=(X_test_ex, y_test_ex), epochs=epochs, batch_size=batch_size) '\nEpoch 1000/1000\n4/4 [==============================] - 0s 20ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 0.0114 - val_accuracy: 1.0000\n' print(history.history.keys()) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() plt.plot(history.history['accuracy']) plt.plot(history.history['loss']) plt.title('model accuracy vs loss') plt.xlabel('epoch') plt.legend(['accuracy', 'loss'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() plt.plot(history.history['val_accuracy']) plt.plot(history.history['val_loss']) plt.title('model validation accuracy vs loss') plt.xlabel('epoch') plt.legend(['val_accuracy', 'val_loss'], loc='upper left') plt.show()
code
128010348/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape df.columns
code
128010348/cell_2
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df.head()
code
128010348/cell_11
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.head()
code
128010348/cell_19
[ "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape sns.heatmap(df.corr(), annot=True) plt.title('Heatmap of co-relation between variables', fontsize=16) plt.show()
code
128010348/cell_18
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape sns.countplot(x='Species', data=df) plt.show()
code
128010348/cell_32
[ "image_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.utils import plot_model from tensorflow.keras.wrappers.scikit_learn import KerasClassifier y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) neurons = 1024 dropout_rate = 0.5 kernel_regularizer = 0.0001 learning_rate = 0.0001 batch_size = 32 epochs = 1000 input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(X_train_ex, y_train_ex, validation_data=(X_test_ex, y_test_ex), epochs=epochs, batch_size=batch_size) '\nEpoch 1000/1000\n4/4 [==============================] - 0s 20ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 0.0114 - val_accuracy: 1.0000\n' plot_model(model, show_shapes=True)
code
128010348/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape print(df['Species'].unique()) print(df['Species'].value_counts())
code
128010348/cell_16
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape df.describe()
code
128010348/cell_38
[ "image_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.preprocessing import MinMaxScaler from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.wrappers.scikit_learn import KerasClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) neurons = 1024 dropout_rate = 0.5 kernel_regularizer = 0.0001 learning_rate = 0.0001 batch_size = 32 epochs = 1000 input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(X_train_ex, y_train_ex, validation_data=(X_test_ex, y_test_ex), epochs=epochs, batch_size=batch_size) '\nEpoch 1000/1000\n4/4 [==============================] - 0s 20ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 0.0114 - val_accuracy: 1.0000\n' y_pred_proba = model.predict(X_test_ex) y_pred = np.argmax(y_pred_proba, axis=1) y_test_ex2 = y_test_ex y_test_ex = np.argmax(y_test_ex, axis=1) df_y_pred = pd.DataFrame(y_pred, columns=['Predicted']) df_y_test = pd.DataFrame(list(y_test_ex), columns=['Actual']) fig, ax = plt.subplots(1, 2, figsize=(15, 5)) sns.countplot(x=df_y_test['Actual'], palette='Set2', ax=ax[0]) sns.countplot(x=df_y_pred['Predicted'], palette='Set2', ax=ax[1]) fig.show()
code
128010348/cell_35
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.wrappers.scikit_learn import KerasClassifier import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) neurons = 1024 dropout_rate = 0.5 kernel_regularizer = 0.0001 learning_rate = 0.0001 batch_size = 32 epochs = 1000 input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(X_train_ex, y_train_ex, validation_data=(X_test_ex, y_test_ex), epochs=epochs, batch_size=batch_size) '\nEpoch 1000/1000\n4/4 [==============================] - 0s 20ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 0.0114 - val_accuracy: 1.0000\n' y_pred_proba = model.predict(X_test_ex) y_pred = np.argmax(y_pred_proba, axis=1)
code
128010348/cell_31
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.wrappers.scikit_learn import KerasClassifier y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) neurons = 1024 dropout_rate = 0.5 kernel_regularizer = 0.0001 learning_rate = 0.0001 batch_size = 32 epochs = 1000 input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(X_train_ex, y_train_ex, validation_data=(X_test_ex, y_test_ex), epochs=epochs, batch_size=batch_size) '\nEpoch 1000/1000\n4/4 [==============================] - 0s 20ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 0.0114 - val_accuracy: 1.0000\n'
code
128010348/cell_14
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.shape
code
128010348/cell_27
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from catboost import CatBoostClassifier, Pool from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier lr = LogisticRegression(C=1, solver='newton-cg', random_state=42) rf = RandomForestClassifier(n_estimators=50, max_depth=10, random_state=42) gb = GradientBoostingClassifier(n_estimators=10, learning_rate=0.005, random_state=42) svm = SVC(C=100, kernel='rbf', random_state=42) knn = KNeighborsClassifier(weights='uniform', n_neighbors=5, metric='manhattan') xgb = XGBClassifier(n_estimators=250, max_depth=10, learning_rate=0.0001, random_state=42) cat = CatBoostClassifier(iterations=50, max_depth=10, learning_rate=0.005, loss_function='MultiClass', eval_metric='MultiClass', random_state=42) dt = DecisionTreeClassifier(max_features='sqrt', max_depth=5, criterion='gini', ccp_alpha=0.001, random_state=42) lr.fit(X_train_ex, y_train_ex) rf.fit(X_train_ex, y_train_ex) gb.fit(X_train_ex, y_train_ex) svm.fit(X_train_ex, y_train_ex) knn.fit(X_train_ex, y_train_ex) xgb.fit(X_train_ex, y_train_ex) cat.fit(X_train_ex, y_train_ex) dt.fit(X_train_ex, y_train_ex)
code
128010348/cell_36
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.wrappers.scikit_learn import KerasClassifier import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) neurons = 1024 dropout_rate = 0.5 kernel_regularizer = 0.0001 learning_rate = 0.0001 batch_size = 32 epochs = 1000 input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(X_train_ex, y_train_ex, validation_data=(X_test_ex, y_test_ex), epochs=epochs, batch_size=batch_size) '\nEpoch 1000/1000\n4/4 [==============================] - 0s 20ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 0.0114 - val_accuracy: 1.0000\n' y_pred_proba = model.predict(X_test_ex) y_pred = np.argmax(y_pred_proba, axis=1) y_pred
code
17133428/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import numpy as np import pandas as pd df = pd.read_csv('../input/raw_lemonade_data.csv') df['Date'] = pd.to_datetime(df['Date']) df['Price'] = df.Price.str.replace('$', '').replace(' ', '') df['Price'] = df.Price.astype(np.float64) df = df.set_index(df['Date']) df = df.drop('Date', 1) df['Revenue'] = df.Price * df.Sales df = df[['Revenue', 'Temperature', 'Rainfall', 'Flyers']] X = df[['Flyers']] y = df[['Revenue']] flyer_linear_model = LinearRegression().fit(X, y) print('Coefficient for flyers:', flyer_linear_model.coef_) print('Linear model coefficients are the weights of the model')
code
17133428/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import numpy as np import pandas as pd df = pd.read_csv('../input/raw_lemonade_data.csv') df['Date'] = pd.to_datetime(df['Date']) df['Price'] = df.Price.str.replace('$', '').replace(' ', '') df['Price'] = df.Price.astype(np.float64) df = df.set_index(df['Date']) df = df.drop('Date', 1) df['Revenue'] = df.Price * df.Sales df = df[['Revenue', 'Temperature', 'Rainfall', 'Flyers']] X = df[['Flyers']] y = df[['Revenue']] flyer_linear_model = LinearRegression().fit(X, y) print('Intercept:', flyer_linear_model.intercept_[0]) print("We'll sell about $4.18 in revenue at the minimum level of flyers")
code
17133428/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/raw_lemonade_data.csv') df['Date'] = pd.to_datetime(df['Date']) df['Price'] = df.Price.str.replace('$', '').replace(' ', '') df['Price'] = df.Price.astype(np.float64) df = df.set_index(df['Date']) df = df.drop('Date', 1) df['Revenue'] = df.Price * df.Sales df = df[['Revenue', 'Temperature', 'Rainfall', 'Flyers']] df.head()
code
73067513/cell_9
[ "text_plain_output_1.png" ]
s = 'Hi. Welcome to Filoger Bootcamp.' l = s.split(' ') l1 = s.split('.') print('saize jomalat in text :'.format(s), len(l1) - 1)
code
73067513/cell_4
[ "text_plain_output_1.png" ]
s = 'Hi. Welcome to Filoger Bootcamp.' l = s.split(' ') print(l)
code
73067513/cell_6
[ "text_plain_output_1.png" ]
s = 'Hi. Welcome to Filoger Bootcamp.' l = s.split(' ') print('saize klamat in text :'.format(s), len(l))
code
73067513/cell_18
[ "text_plain_output_1.png" ]
def a(email): pass def a(speed): speed > 80 a(100)
code
73067513/cell_15
[ "text_plain_output_1.png" ]
def my_func(str): return 'Iran' in str my_func('welcome to Iran')
code
73067513/cell_12
[ "text_plain_output_1.png" ]
def a(email): pass a('[email protected]') a('[email protected]')
code
17114755/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.autograd import Variable import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train.iloc[:, 1:].values / 255.0 y_train = train.iloc[:, 0].values x_test = test.values / 255 x_train = np.reshape(x_train, (-1, 1, 28, 28)) x_test = np.reshape(x_test, (-1, 1, 28, 28)) (x_train.shape, x_test.shape) random_seed = 234 x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=random_seed) (x_train.shape, x_val.shape, y_train.shape, y_val.shape) def display(rows, columns, images, values=[], predictions=[], save_image=False, filename='image.png', show=False): fig = plt.figure(figsize=(15, 15)) # plt.ioff() ax = [] for i in range( columns*rows ): img = images[i] ax.append(fig.add_subplot(rows, columns, i+1)) title = "" if(len(values) == 0 and len(predictions) != 0): title = "Pred:" + str(predictions[i]) elif(len(values) != 0 and len(predictions) == 0): title = "Value:" + str(values[i]) elif(len(values) != 0 and len(predictions) != 0): title = "\nValue:" + str(values[i]) + "\nPred:" + str(predictions[i]) ax[-1].set_title(title) # set title plt.imshow(img) plt.axis('off') if save_image: plt.savefig(filename) if not show: plt.close(fig) # plt.show() idx = np.random.randint(1, 1000, size=9) images = x_train[idx,:] images = images[:,0] values = y_train[idx] print(images.shape) display(rows=3, columns=3, images=images, values=values, predictions=[], save_image=True, filename='Autoencoder-validation.png', show=True) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') class customLoss(nn.Module): def __init__(self): super(customLoss, self).__init__() self.mse_loss = nn.MSELoss(reduction='sum') def forward(self, x_recon, x, mu, logvar): loss_MSE = self.mse_loss(x_recon, x) loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return loss_MSE + loss_KLD class VAE_CNN(nn.Module): def __init__(self): super(VAE_CNN, self).__init__() self.e_conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1, bias=False) self.e_bn1 = nn.BatchNorm2d(16) self.e_conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias=False) self.e_bn2 = nn.BatchNorm2d(32) self.e_conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) self.e_bn3 = nn.BatchNorm2d(64) self.e_conv4 = nn.Conv2d(64, 16, kernel_size=3, stride=2, padding=1, bias=False) self.e_bn4 = nn.BatchNorm2d(16) self.e_fc1 = nn.Linear(7 * 7 * 16, 512) self.e_fc_bn1 = nn.BatchNorm1d(512) self.e_fc2 = nn.Linear(512, 256) self.e_fc_bn2 = nn.BatchNorm1d(256) self.lv_fc1 = nn.Linear(256, 256) self.lv_fc2 = nn.Linear(256, 256) self.d_fc1 = nn.Linear(256, 512) self.d_fc_bn1 = nn.BatchNorm1d(512) self.d_fc2 = nn.Linear(512, 7 * 7 * 16) self.d_fc_bn2 = nn.BatchNorm1d(7 * 7 * 16) self.d_conv1 = nn.ConvTranspose2d(16, 64, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False) self.d_bn1 = nn.BatchNorm2d(64) self.d_conv2 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=1, padding=1, bias=False) self.d_bn2 = nn.BatchNorm2d(32) self.d_conv3 = nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False) self.d_bn3 = nn.BatchNorm2d(16) self.d_conv4 = nn.ConvTranspose2d(16, 1, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU() def encode(self, x, test=False): x = self.relu(self.e_bn1(self.e_conv1(x))) x = self.relu(self.e_bn2(self.e_conv2(x))) x = self.relu(self.e_bn3(self.e_conv3(x))) x = self.relu(self.e_bn4(self.e_conv4(x))) x = x.view(-1, 16 * 7 * 7) x = self.relu(self.e_fc_bn1(self.e_fc1(x))) x = self.relu(self.e_fc_bn2(self.e_fc2(x))) r1 = self.lv_fc1(x) r2 = self.lv_fc1(x) return (r1, r2) def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def decode(self, z, test=False): z = self.relu(self.d_fc_bn1(self.d_fc1(z))) z = self.relu(self.d_fc_bn2(self.d_fc2(z))) z = z.view(-1, 16, 7, 7) z = self.relu(self.d_bn1(self.d_conv1(z))) z = self.relu(self.d_bn2(self.d_conv2(z))) z = self.relu(self.d_bn3(self.d_conv3(z))) z = self.d_conv4(z) return z def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) return (self.decode(z), mu, logvar) def print(self, t, test=False): pass def setup(self, train_data_loader, val_data_loader, epochs=10, log_interval=10): self.optimizer = optim.Adam(self.parameters(), lr=0.001) self.loss_mse = customLoss() self.epochs = epochs self.epoch = 0 self.train_data_loader = train_data_loader self.val_data_loader = val_data_loader self.log_interval = log_interval self.is_cuda = True def validate_epoch(self): it = iter(self.val_data_loader) ii = random.randint(1, 30) for i in range(ii): data, _ = it.next() output, _, _ = model(data.cuda()) data = data.numpy() output = output.cpu().data.numpy() data = data[:, 0] output = output[:, 0] images = [] for i in range(18): images.append(data[i]) images.append(output[i]) def train_epoch(self): self.train() train_loss = 0.0 for batch_idx, (data, _) in enumerate(self.train_data_loader): if self.is_cuda: data = data.cuda() self.optimizer.zero_grad() recon_batch, mu, logvar = self(data) loss = self.loss_mse(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() def train_model(self): self.train() self.validate_epoch() for i in range(self.epochs): self.train_epoch() self.epoch += 1 self.validate_epoch() model = VAE_CNN().cuda() torch_x_train = torch.from_numpy(x_train).type(torch.FloatTensor) torch_y_train = torch.from_numpy(y_train).type(torch.LongTensor) torch_x_val = torch.from_numpy(x_val).type(torch.FloatTensor) torch_y_val = torch.from_numpy(y_val).type(torch.LongTensor) torch_x_test = torch.from_numpy(x_test).type(torch.FloatTensor) train_dataset = torch.utils.data.TensorDataset(torch_x_train, torch_y_train) val_dataset = torch.utils.data.TensorDataset(torch_x_val, torch_y_val) test_dataset = torch.utils.data.TensorDataset(torch_x_test) train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=False) val_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=100, shuffle=False) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False) model = VAE_CNN().cuda() model.setup(train_data_loader, val_data_loader, epochs=100) import PIL images = [] values = [] for i in range(10): image = Image.open('Autoencoder-validation-' + str(i) + '.png') images.append(np.asarray(image)) values.append('Epoch ' + str(i)) class Classifier(nn.Module): def __init__(self, vae_cnn, demo=False): super(Classifier, self).__init__() self.demo = demo self.vae_cnn = vae_cnn self.classifier_fc1 = nn.Linear(256, 1024) self.classifier_fc_bn1 = nn.BatchNorm1d(1024) self.classifier_fc2 = nn.Linear(1024, 512) self.classifier_fc_bn2 = nn.BatchNorm1d(512) self.classifier_fc3 = nn.Linear(512, 256) self.classifier_fc_bn3 = nn.BatchNorm1d(256) self.classifier_fc4 = nn.Linear(256, 128) self.classifier_fc_bn4 = nn.BatchNorm1d(128) self.classifier_fc5 = nn.Linear(128, 10) def forward(self, x): mu, logvar = self.vae_cnn.encode(x) x = F.relu(self.classifier_fc_bn1(self.classifier_fc1(mu))) x = F.relu(self.classifier_fc_bn2(self.classifier_fc2(x))) x = F.relu(self.classifier_fc_bn3(self.classifier_fc3(x))) x = F.relu(self.classifier_fc_bn4(self.classifier_fc4(x))) x = self.classifier_fc5(x) return F.log_softmax(x, dim=1) def print(self, t): pass classifier = Classifier(model).cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(classifier.parameters(), lr=0.1, momentum=0.9) with torch.no_grad(): correct_classification = 0 total = 0 id_codes = [] diagnosis = [] for i, data in enumerate(val_data_loader, 0): images, labels = data images_cuda = images.cuda() output_cuda = classifier(images_cuda) output = output_cuda.cpu().data _, predicted = torch.max(output, 1) correct_classification += torch.sum(labels == predicted).item() total += len(labels) predicted = predicted.cpu().numpy() print('Accuracy of the network %.2f %%' % (100 * correct_classification / total))
code
17114755/cell_4
[ "image_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train.iloc[:, 1:].values / 255.0 y_train = train.iloc[:, 0].values x_test = test.values / 255 x_train = np.reshape(x_train, (-1, 1, 28, 28)) x_test = np.reshape(x_test, (-1, 1, 28, 28)) (x_train.shape, x_test.shape)
code
17114755/cell_20
[ "text_plain_output_1.png" ]
epochs = 100 for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(train_data_loader, 0): inputs, labels = data inputs, labels = (inputs.cuda(), labels.cuda()) optimizer.zero_grad() outputs = classifier(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() average_loss = running_loss / len(train_data_loader) print('[%d] loss: %.8f' % (epoch + 1, average_loss))
code
17114755/cell_6
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train.iloc[:, 1:].values / 255.0 y_train = train.iloc[:, 0].values x_test = test.values / 255 x_train = np.reshape(x_train, (-1, 1, 28, 28)) x_test = np.reshape(x_test, (-1, 1, 28, 28)) (x_train.shape, x_test.shape) random_seed = 234 x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=random_seed) (x_train.shape, x_val.shape, y_train.shape, y_val.shape) def display(rows, columns, images, values=[], predictions=[], save_image=False, filename='image.png', show=False): fig = plt.figure(figsize=(15, 15)) ax = [] for i in range(columns * rows): img = images[i] ax.append(fig.add_subplot(rows, columns, i + 1)) title = '' if len(values) == 0 and len(predictions) != 0: title = 'Pred:' + str(predictions[i]) elif len(values) != 0 and len(predictions) == 0: title = 'Value:' + str(values[i]) elif len(values) != 0 and len(predictions) != 0: title = '\nValue:' + str(values[i]) + '\nPred:' + str(predictions[i]) ax[-1].set_title(title) plt.imshow(img) plt.axis('off') if save_image: plt.savefig(filename) if not show: plt.close(fig) idx = np.random.randint(1, 1000, size=9) images = x_train[idx, :] images = images[:, 0] values = y_train[idx] print(images.shape) display(rows=3, columns=3, images=images, values=values, predictions=[], save_image=True, filename='Autoencoder-validation.png', show=True)
code
17114755/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') print(train.shape, test.shape)
code
17114755/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import random from sklearn.model_selection import train_test_split import struct import torch from PIL import Image import matplotlib.pyplot as plt import torchvision from torch.autograd import Variable import torch.optim as optim import torch.nn as nn import torch.nn.functional as F import copy import os print(os.listdir('../input'))
code
17114755/cell_7
[ "image_output_1.png" ]
import torch device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device)
code
17114755/cell_16
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.autograd import Variable import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.optim as optim train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train.iloc[:, 1:].values / 255.0 y_train = train.iloc[:, 0].values x_test = test.values / 255 x_train = np.reshape(x_train, (-1, 1, 28, 28)) x_test = np.reshape(x_test, (-1, 1, 28, 28)) (x_train.shape, x_test.shape) random_seed = 234 x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=random_seed) (x_train.shape, x_val.shape, y_train.shape, y_val.shape) def display(rows, columns, images, values=[], predictions=[], save_image=False, filename='image.png', show=False): fig = plt.figure(figsize=(15, 15)) # plt.ioff() ax = [] for i in range( columns*rows ): img = images[i] ax.append(fig.add_subplot(rows, columns, i+1)) title = "" if(len(values) == 0 and len(predictions) != 0): title = "Pred:" + str(predictions[i]) elif(len(values) != 0 and len(predictions) == 0): title = "Value:" + str(values[i]) elif(len(values) != 0 and len(predictions) != 0): title = "\nValue:" + str(values[i]) + "\nPred:" + str(predictions[i]) ax[-1].set_title(title) # set title plt.imshow(img) plt.axis('off') if save_image: plt.savefig(filename) if not show: plt.close(fig) # plt.show() idx = np.random.randint(1, 1000, size=9) images = x_train[idx,:] images = images[:,0] values = y_train[idx] print(images.shape) display(rows=3, columns=3, images=images, values=values, predictions=[], save_image=True, filename='Autoencoder-validation.png', show=True) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') class customLoss(nn.Module): def __init__(self): super(customLoss, self).__init__() self.mse_loss = nn.MSELoss(reduction='sum') def forward(self, x_recon, x, mu, logvar): loss_MSE = self.mse_loss(x_recon, x) loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return loss_MSE + loss_KLD class VAE_CNN(nn.Module): def __init__(self): super(VAE_CNN, self).__init__() self.e_conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1, bias=False) self.e_bn1 = nn.BatchNorm2d(16) self.e_conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias=False) self.e_bn2 = nn.BatchNorm2d(32) self.e_conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) self.e_bn3 = nn.BatchNorm2d(64) self.e_conv4 = nn.Conv2d(64, 16, kernel_size=3, stride=2, padding=1, bias=False) self.e_bn4 = nn.BatchNorm2d(16) self.e_fc1 = nn.Linear(7 * 7 * 16, 512) self.e_fc_bn1 = nn.BatchNorm1d(512) self.e_fc2 = nn.Linear(512, 256) self.e_fc_bn2 = nn.BatchNorm1d(256) self.lv_fc1 = nn.Linear(256, 256) self.lv_fc2 = nn.Linear(256, 256) self.d_fc1 = nn.Linear(256, 512) self.d_fc_bn1 = nn.BatchNorm1d(512) self.d_fc2 = nn.Linear(512, 7 * 7 * 16) self.d_fc_bn2 = nn.BatchNorm1d(7 * 7 * 16) self.d_conv1 = nn.ConvTranspose2d(16, 64, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False) self.d_bn1 = nn.BatchNorm2d(64) self.d_conv2 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=1, padding=1, bias=False) self.d_bn2 = nn.BatchNorm2d(32) self.d_conv3 = nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False) self.d_bn3 = nn.BatchNorm2d(16) self.d_conv4 = nn.ConvTranspose2d(16, 1, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU() def encode(self, x, test=False): x = self.relu(self.e_bn1(self.e_conv1(x))) x = self.relu(self.e_bn2(self.e_conv2(x))) x = self.relu(self.e_bn3(self.e_conv3(x))) x = self.relu(self.e_bn4(self.e_conv4(x))) x = x.view(-1, 16 * 7 * 7) x = self.relu(self.e_fc_bn1(self.e_fc1(x))) x = self.relu(self.e_fc_bn2(self.e_fc2(x))) r1 = self.lv_fc1(x) r2 = self.lv_fc1(x) return (r1, r2) def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def decode(self, z, test=False): z = self.relu(self.d_fc_bn1(self.d_fc1(z))) z = self.relu(self.d_fc_bn2(self.d_fc2(z))) z = z.view(-1, 16, 7, 7) z = self.relu(self.d_bn1(self.d_conv1(z))) z = self.relu(self.d_bn2(self.d_conv2(z))) z = self.relu(self.d_bn3(self.d_conv3(z))) z = self.d_conv4(z) return z def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) return (self.decode(z), mu, logvar) def print(self, t, test=False): pass def setup(self, train_data_loader, val_data_loader, epochs=10, log_interval=10): self.optimizer = optim.Adam(self.parameters(), lr=0.001) self.loss_mse = customLoss() self.epochs = epochs self.epoch = 0 self.train_data_loader = train_data_loader self.val_data_loader = val_data_loader self.log_interval = log_interval self.is_cuda = True def validate_epoch(self): it = iter(self.val_data_loader) ii = random.randint(1, 30) for i in range(ii): data, _ = it.next() output, _, _ = model(data.cuda()) data = data.numpy() output = output.cpu().data.numpy() data = data[:, 0] output = output[:, 0] images = [] for i in range(18): images.append(data[i]) images.append(output[i]) def train_epoch(self): self.train() train_loss = 0.0 for batch_idx, (data, _) in enumerate(self.train_data_loader): if self.is_cuda: data = data.cuda() self.optimizer.zero_grad() recon_batch, mu, logvar = self(data) loss = self.loss_mse(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() def train_model(self): self.train() self.validate_epoch() for i in range(self.epochs): self.train_epoch() self.epoch += 1 self.validate_epoch() model = VAE_CNN().cuda() import PIL images = [] values = [] for i in range(10): image = Image.open('Autoencoder-validation-' + str(i) + '.png') images.append(np.asarray(image)) values.append('Epoch ' + str(i)) display(rows=3, columns=3, images=images, values=values, predictions=[], show=True)
code
17114755/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train.iloc[:, 1:].values / 255.0 y_train = train.iloc[:, 0].values x_test = test.values / 255 print((x_train.shape, y_train.shape), x_test.shape)
code
17114755/cell_14
[ "text_plain_output_1.png" ]
model.train_model()
code
17114755/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') x_train = train.iloc[:, 1:].values / 255.0 y_train = train.iloc[:, 0].values x_test = test.values / 255 x_train = np.reshape(x_train, (-1, 1, 28, 28)) x_test = np.reshape(x_test, (-1, 1, 28, 28)) (x_train.shape, x_test.shape) random_seed = 234 x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=random_seed) (x_train.shape, x_val.shape, y_train.shape, y_val.shape)
code
16166937/cell_21
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import PolynomialFeatures import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3) poly.fit(df) xtr2 = poly.transform(xtr) xtr2.shape lr2 = LogisticRegression() lr2.fit(xtr2, ytr)
code
16166937/cell_13
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(xtr, ytr)
code
16166937/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 y.describe()
code
16166937/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') from sklearn.preprocessing import MinMaxScaler minmax = MinMaxScaler() df['average_montly_hours'] = minmax.fit_transform(df[['average_montly_hours']]) df['average_montly_hours'].describe()
code
16166937/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.preprocessing import PolynomialFeatures import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3) poly.fit(df) xtr2 = poly.transform(xtr) xtr2.shape lr2 = LogisticRegression() lr2.fit(xtr2, ytr) xte2 = poly.transform(xte) xte2.shape lr2.fit(xtr2, ytr) y_pred2 = lr2.predict(xte2) print(classification_report(yte, y_pred2))
code
16166937/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary
code
16166937/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.head()
code
16166937/cell_11
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape)
code
16166937/cell_19
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import PolynomialFeatures import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3) poly.fit(df) xtr2 = poly.transform(xtr) xtr2.shape
code
16166937/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16166937/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt colormap = plt.cm.get_cmap('Greens') fig, ax = plt.subplots(figsize=(12, 3)) plot = ax.pcolor(sales_salary.T, cmap=colormap, edgecolor='black') ax.set_xlabel('sales') ax.set_xticks(np.arange(len(sales_salary.index.values)) + 0.5) ax.set_xticklabels(sales_salary.index.values) ax.set_ylabel('salary') ax.set_yticks(np.arange(len(sales_salary.columns.values)) + 0.5) ax.set_yticklabels(sales_salary.columns.values) cbar = fig.colorbar(plot) cbar.ax.set_ylabel('quantity', rotation=360) cbar.ax.get_yaxis().labelpad = 25
code
16166937/cell_18
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import PolynomialFeatures import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3) poly.fit(df)
code
16166937/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) df.head()
code
16166937/cell_15
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(xtr, ytr) from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix confusion_matrix(yte, lr.predict(xte))
code
16166937/cell_16
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(xtr, ytr) y_pred = rf.predict(xte) confusion_matrix(yte, y_pred)
code
16166937/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.describe()
code
16166937/cell_17
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(xtr, ytr) y_pred = rf.predict(xte) confusion_matrix(yte, y_pred) print(classification_report(yte, y_pred))
code
16166937/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(xtr, ytr) from sklearn.metrics import classification_report print(classification_report(yte, lr.predict(xte)))
code
16166937/cell_22
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import PolynomialFeatures import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['low'].copy() y[df['medium'] == 1] = 2 y[df['high'] == 1] = 3 df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3) poly.fit(df) xtr2 = poly.transform(xtr) xtr2.shape xte2 = poly.transform(xte) xte2.shape
code
16166937/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) df.drop(labels=['low', 'medium', 'high'], axis=1, inplace=True) df.head()
code
16166937/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/HR_comma_sep.csv') df.columns
code
129002488/cell_21
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) accuracy = [] iter = 1 for i in range(31, 35): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train, y_train) y_predi = knn.predict(X_test) score = accuracy_score(y_test, y_predi) accuracy.append(score * 100) for i in accuracy: print('ACCURACY OF ' + str(iter) + 'th model is: ' + str(i)) iter += 1
code
129002488/cell_13
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier
code
129002488/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() df['employment_type'].value_counts()
code
129002488/cell_25
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) from sklearn.model_selection import GridSearchCV param_grid = {'n_estimators': [5, 7, 10, 11, 12], 'criterion': ['gini', 'entropy', 'log_loss'], 'max_depth': [7, 8, 9, 10], 'max_features': ['sqrt', 'log2']} cv = GridSearchCV(RandomForestClassifier(), param_grid=param_grid, refit=True, cv=5, verbose=0) cv.fit(X_train, y_train) y_predCV = cv.predict(X_test) scoreRFC_CV = accuracy_score(y_test, y_predCV) print('ACCURACY OF ' + 'the RFC model is: ' + str(scoreRFC_CV * 100) + '%')
code
129002488/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.head()
code
129002488/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) rfc = RandomForestClassifier(max_depth=7) rfc.fit(X_train, y_train) y_predRFC = rfc.predict(X_test) scoreRFC = accuracy_score(y_test, y_predRFC) print('ACCURACY OF ' + 'the KNeighboursClassifier model is: ' + str(scoreRFC * 100) + '%')
code
129002488/cell_20
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() catg = [] for i in df.columns: if df[i].dtype == 'O': catg.append(i) import seaborn as sns correl = df.corr() x = df.drop(['company_size'], axis=1) y = df['company_size'] X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42) k = np.sqrt(df.shape[0]) k
code
129002488/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum()
code
129002488/cell_26
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) from sklearn.model_selection import GridSearchCV param_grid = {'n_estimators': [5, 7, 10, 11, 12], 'criterion': ['gini', 'entropy', 'log_loss'], 'max_depth': [7, 8, 9, 10], 'max_features': ['sqrt', 'log2']} cv = GridSearchCV(RandomForestClassifier(), param_grid=param_grid, refit=True, cv=5, verbose=0) cv.fit(X_train, y_train) y_predCV = cv.predict(X_test) scoreRFC_CV = accuracy_score(y_test, y_predCV) cv.best_estimator_
code
129002488/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() df['experience_level'].unique()
code
129002488/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129002488/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() df['job_title'].value_counts()
code
129002488/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() catg = [] for i in df.columns: if df[i].dtype == 'O': catg.append(i) import seaborn as sns correl = df.corr() sns.heatmap(correl)
code
129002488/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() catg = [] for i in df.columns: if df[i].dtype == 'O': catg.append(i) import seaborn as sns correl = df.corr() df.head()
code
129002488/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) accuracy = [] iter = 1 for i in range(31, 35): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train, y_train) y_predi = knn.predict(X_test) score = accuracy_score(y_test, y_predi) accuracy.append(score * 100) for i in accuracy: iter += 1 knn = KNeighborsClassifier(n_neighbors=32) knn.fit(X_train, y_train) y_predi = knn.predict(X_test) score = accuracy_score(y_test, y_predi) print('ACCURACY OF ' + 'the KNeighboursClassifier model is: ' + str(score * 100) + '%')
code
129002488/cell_27
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) from sklearn.model_selection import GridSearchCV param_grid = {'n_estimators': [5, 7, 10, 11, 12], 'criterion': ['gini', 'entropy', 'log_loss'], 'max_depth': [7, 8, 9, 10], 'max_features': ['sqrt', 'log2']} cv = GridSearchCV(RandomForestClassifier(), param_grid=param_grid, refit=True, cv=5, verbose=0) cv.fit(X_train, y_train) y_predCV = cv.predict(X_test) scoreRFC_CV = accuracy_score(y_test, y_predCV) cv.best_estimator_ cv.fit(X_train, y_train) y_predCV = cv.predict(X_test) scoreRFC_CV = accuracy_score(y_test, y_predCV) print('ACCURACY OF ' + 'the RFC model is: ' + str(scoreRFC_CV * 100) + '%')
code
129002488/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.isnull().sum().sum() catg = [] for i in df.columns: if df[i].dtype == 'O': catg.append(i) for i in catg: print('UNIQUE VALUES IN {} ARE:'.format(i)) for i in df[i].unique(): print(i, end=' ') print('\n')
code
129002488/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') df.info()
code
128024284/cell_21
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) test[test.duplicated()]
code
128024284/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) fig, ax = plt.subplots(1, 3, figsize=(20, 6), sharey=True) for i, col in enumerate(['fruitset', 'fruitmass', 'seeds']): sns.scatterplot(train, x=col, y='yield', ax=ax[i]) train[train.drop(columns=['yield']).duplicated()] fig, ax = plt.subplots(2, 3, figsize=(20,6)) ax = ax.flatten() for i, a in enumerate(['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']): sns.countplot(x=train[a], ax=ax[i]) fig, ax = plt.subplots(1, 2, figsize=(10, 3)) for i, a in enumerate(['AverageRainingDays', 'RainingDays']): sns.countplot(x=train[a], ax=ax[i])
code
128024284/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) fig, ax = plt.subplots(1, 3, figsize=(20, 6), sharey=True) for i, col in enumerate(['fruitset', 'fruitmass', 'seeds']): sns.scatterplot(train, x=col, y='yield', ax=ax[i]) train[train.drop(columns=['yield']).duplicated()] fig, ax = plt.subplots(2, 3, figsize=(20, 6)) ax = ax.flatten() for i, a in enumerate(['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']): sns.countplot(x=train[a], ax=ax[i])
code
128024284/cell_44
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn import linear_model from sklearn.metrics import mean_absolute_error from sklearn.model_selection import RepeatedKFold from tqdm.notebook import tqdm import catboost as cb import lightgbm as lgbm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import pandas as pd import numpy as np from tqdm.notebook import tqdm import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker import shap from sklearn.model_selection import RepeatedKFold from sklearn.metrics import mean_absolute_error from sklearn import linear_model import xgboost as xgb import catboost as cb import lightgbm as lgbm RANDOM_STATE = 69 train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) train[train.drop(columns=['yield']).duplicated()] test[test.duplicated()] for df in [train, test]: trange_cols = [col for col in train.columns if 'TRange' in col] rain_cols = [col for col in train.columns if 'RainingDays' in col] df['TSum'] = df[trange_cols].sum(axis=1) df['RSum'] = df[rain_cols].sum(axis=1) df.drop(columns=trange_cols + rain_cols, inplace=True) def kfold(model, X, y, n_splits=5, n_repeats=5): kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=RANDOM_STATE) scores = np.zeros(n_splits * n_repeats) for i, (train_idx, val_idx) in tqdm(enumerate(kf.split(X, y)), total=n_splits * n_repeats): X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx]) y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx]) eval_set = [(X_val, y_val)] if isinstance(model, lgbm.sklearn.LGBMRegressor): callbacks = [lgbm.callback.early_stopping(100, verbose=False), lgbm.callback.log_evaluation(0)] model.fit(X_train, y_train, eval_set=eval_set, eval_metric='MAE', callbacks=callbacks) elif isinstance(model, cb.core.CatBoostRegressor): model.fit(X_train, y_train, eval_set=eval_set, early_stopping_rounds=100) else: model.fit(X_train, y_train) scores[i] = mean_absolute_error(y_val, model.predict(X_val)) return np.mean(scores) models = {'cb': cb.CatBoostRegressor(verbose=0, random_state=RANDOM_STATE, objective='MAE'), 'lgbm': lgbm.LGBMRegressor(verbose=0, force_col_wise=True, random_state=RANDOM_STATE, objective='MAE'), 'linreg': linear_model.LinearRegression(), 'lasso': linear_model.LassoCV(), 'ridge': linear_model.RidgeCV()} X, y = (train.drop(columns=['yield']), train['yield']) model_scores = dict() for name, model in models.items(): if name in ['cb', 'lgbm', 'xgb']: model_scores[name] = kfold(model, X, y) else: model_scores[name] = kfold(model, X, y) model = models['lgbm'] pred = model.predict(test) submission = pd.DataFrame({'id': test.index, 'yield': pred}).set_index('id') submission.to_csv('submission.csv')
code
128024284/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.ticker as ticker import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) fig, ax = plt.subplots(1, 3, figsize=(20, 6), sharey=True) for i, col in enumerate(['fruitset', 'fruitmass', 'seeds']): sns.scatterplot(train, x=col, y='yield', ax=ax[i]) train[train.drop(columns=['yield']).duplicated()] test[test.duplicated()] fig, ax = plt.subplots(2, 3, figsize=(20,6)) ax = ax.flatten() for i, a in enumerate(['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']): sns.countplot(x=train[a], ax=ax[i]) fig, ax = plt.subplots(1, 2, figsize=(10,3)) for i, a in enumerate(['AverageRainingDays', 'RainingDays']): sns.countplot(x=train[a], ax=ax[i]) for df in [train, test]: trange_cols = [col for col in train.columns if 'TRange' in col] rain_cols = [col for col in train.columns if 'RainingDays' in col] df['TSum'] = df[trange_cols].sum(axis=1) df['RSum'] = df[rain_cols].sum(axis=1) df.drop(columns=trange_cols + rain_cols, inplace=True) fig, ax = plt.subplots(2, 1, figsize=(10, 6)) for i, a in enumerate(['TSum', 'RSum']): sns.countplot(y=train[a], ax=ax[i]) ax[i].xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.2f}'.format(x)))
code
128024284/cell_39
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.metrics import mean_absolute_error from sklearn.model_selection import RepeatedKFold from tqdm.notebook import tqdm import catboost as cb import lightgbm as lgbm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import shap import pandas as pd import numpy as np from tqdm.notebook import tqdm import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker import shap from sklearn.model_selection import RepeatedKFold from sklearn.metrics import mean_absolute_error from sklearn import linear_model import xgboost as xgb import catboost as cb import lightgbm as lgbm RANDOM_STATE = 69 train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) train[train.drop(columns=['yield']).duplicated()] test[test.duplicated()] for df in [train, test]: trange_cols = [col for col in train.columns if 'TRange' in col] rain_cols = [col for col in train.columns if 'RainingDays' in col] df['TSum'] = df[trange_cols].sum(axis=1) df['RSum'] = df[rain_cols].sum(axis=1) df.drop(columns=trange_cols + rain_cols, inplace=True) def kfold(model, X, y, n_splits=5, n_repeats=5): kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=RANDOM_STATE) scores = np.zeros(n_splits * n_repeats) for i, (train_idx, val_idx) in tqdm(enumerate(kf.split(X, y)), total=n_splits * n_repeats): X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx]) y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx]) eval_set = [(X_val, y_val)] if isinstance(model, lgbm.sklearn.LGBMRegressor): callbacks = [lgbm.callback.early_stopping(100, verbose=False), lgbm.callback.log_evaluation(0)] model.fit(X_train, y_train, eval_set=eval_set, eval_metric='MAE', callbacks=callbacks) elif isinstance(model, cb.core.CatBoostRegressor): model.fit(X_train, y_train, eval_set=eval_set, early_stopping_rounds=100) else: model.fit(X_train, y_train) scores[i] = mean_absolute_error(y_val, model.predict(X_val)) return np.mean(scores) models = {'cb': cb.CatBoostRegressor(verbose=0, random_state=RANDOM_STATE, objective='MAE'), 'lgbm': lgbm.LGBMRegressor(verbose=0, force_col_wise=True, random_state=RANDOM_STATE, objective='MAE'), 'linreg': linear_model.LinearRegression(), 'lasso': linear_model.LassoCV(), 'ridge': linear_model.RidgeCV()} X, y = (train.drop(columns=['yield']), train['yield']) model_scores = dict() for name, model in models.items(): if name in ['cb', 'lgbm', 'xgb']: model_scores[name] = kfold(model, X, y) else: model_scores[name] = kfold(model, X, y) explainer = shap.TreeExplainer(models['cb']) shap_values = explainer.shap_values(X) shap.summary_plot(shap_values, X)
code
128024284/cell_41
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn import linear_model from sklearn.metrics import mean_absolute_error from sklearn.model_selection import RepeatedKFold from tqdm.notebook import tqdm import catboost as cb import lightgbm as lgbm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import shap import pandas as pd import numpy as np from tqdm.notebook import tqdm import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker import shap from sklearn.model_selection import RepeatedKFold from sklearn.metrics import mean_absolute_error from sklearn import linear_model import xgboost as xgb import catboost as cb import lightgbm as lgbm RANDOM_STATE = 69 train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) train[train.drop(columns=['yield']).duplicated()] test[test.duplicated()] for df in [train, test]: trange_cols = [col for col in train.columns if 'TRange' in col] rain_cols = [col for col in train.columns if 'RainingDays' in col] df['TSum'] = df[trange_cols].sum(axis=1) df['RSum'] = df[rain_cols].sum(axis=1) df.drop(columns=trange_cols + rain_cols, inplace=True) def kfold(model, X, y, n_splits=5, n_repeats=5): kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=RANDOM_STATE) scores = np.zeros(n_splits * n_repeats) for i, (train_idx, val_idx) in tqdm(enumerate(kf.split(X, y)), total=n_splits * n_repeats): X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx]) y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx]) eval_set = [(X_val, y_val)] if isinstance(model, lgbm.sklearn.LGBMRegressor): callbacks = [lgbm.callback.early_stopping(100, verbose=False), lgbm.callback.log_evaluation(0)] model.fit(X_train, y_train, eval_set=eval_set, eval_metric='MAE', callbacks=callbacks) elif isinstance(model, cb.core.CatBoostRegressor): model.fit(X_train, y_train, eval_set=eval_set, early_stopping_rounds=100) else: model.fit(X_train, y_train) scores[i] = mean_absolute_error(y_val, model.predict(X_val)) return np.mean(scores) models = {'cb': cb.CatBoostRegressor(verbose=0, random_state=RANDOM_STATE, objective='MAE'), 'lgbm': lgbm.LGBMRegressor(verbose=0, force_col_wise=True, random_state=RANDOM_STATE, objective='MAE'), 'linreg': linear_model.LinearRegression(), 'lasso': linear_model.LassoCV(), 'ridge': linear_model.RidgeCV()} X, y = (train.drop(columns=['yield']), train['yield']) model_scores = dict() for name, model in models.items(): if name in ['cb', 'lgbm', 'xgb']: model_scores[name] = kfold(model, X, y) else: model_scores[name] = kfold(model, X, y) explainer = shap.TreeExplainer(models['cb']) shap_values = explainer.shap_values(X) explainer = shap.TreeExplainer(models['lgbm']) shap_values = explainer.shap_values(X) shap.summary_plot(shap_values, X)
code
128024284/cell_2
[ "image_output_1.png" ]
import pandas as pd import numpy as np from tqdm.notebook import tqdm import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker import shap from sklearn.model_selection import RepeatedKFold from sklearn.metrics import mean_absolute_error from sklearn import linear_model import xgboost as xgb import catboost as cb import lightgbm as lgbm RANDOM_STATE = 69
code
128024284/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) train[train.drop(columns=['yield']).duplicated()]
code
128024284/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True)
code
128024284/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) fig, ax = plt.subplots(1, 3, figsize=(20, 6), sharey=True) for i, col in enumerate(['fruitset', 'fruitmass', 'seeds']): sns.scatterplot(train, x=col, y='yield', ax=ax[i])
code
128024284/cell_35
[ "image_output_1.png" ]
from sklearn import linear_model from sklearn.metrics import mean_absolute_error from sklearn.model_selection import RepeatedKFold from tqdm.notebook import tqdm import catboost as cb import lightgbm as lgbm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import pandas as pd import numpy as np from tqdm.notebook import tqdm import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker import shap from sklearn.model_selection import RepeatedKFold from sklearn.metrics import mean_absolute_error from sklearn import linear_model import xgboost as xgb import catboost as cb import lightgbm as lgbm RANDOM_STATE = 69 train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True) train[train.drop(columns=['yield']).duplicated()] test[test.duplicated()] for df in [train, test]: trange_cols = [col for col in train.columns if 'TRange' in col] rain_cols = [col for col in train.columns if 'RainingDays' in col] df['TSum'] = df[trange_cols].sum(axis=1) df['RSum'] = df[rain_cols].sum(axis=1) df.drop(columns=trange_cols + rain_cols, inplace=True) def kfold(model, X, y, n_splits=5, n_repeats=5): kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=RANDOM_STATE) scores = np.zeros(n_splits * n_repeats) for i, (train_idx, val_idx) in tqdm(enumerate(kf.split(X, y)), total=n_splits * n_repeats): X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx]) y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx]) eval_set = [(X_val, y_val)] if isinstance(model, lgbm.sklearn.LGBMRegressor): callbacks = [lgbm.callback.early_stopping(100, verbose=False), lgbm.callback.log_evaluation(0)] model.fit(X_train, y_train, eval_set=eval_set, eval_metric='MAE', callbacks=callbacks) elif isinstance(model, cb.core.CatBoostRegressor): model.fit(X_train, y_train, eval_set=eval_set, early_stopping_rounds=100) else: model.fit(X_train, y_train) scores[i] = mean_absolute_error(y_val, model.predict(X_val)) return np.mean(scores) models = {'cb': cb.CatBoostRegressor(verbose=0, random_state=RANDOM_STATE, objective='MAE'), 'lgbm': lgbm.LGBMRegressor(verbose=0, force_col_wise=True, random_state=RANDOM_STATE, objective='MAE'), 'linreg': linear_model.LinearRegression(), 'lasso': linear_model.LassoCV(), 'ridge': linear_model.RidgeCV()} X, y = (train.drop(columns=['yield']), train['yield']) model_scores = dict() for name, model in models.items(): if name in ['cb', 'lgbm', 'xgb']: model_scores[name] = kfold(model, X, y) else: model_scores[name] = kfold(model, X, y) for name, score in model_scores.items(): print(f'{name}: {score:.3f}')
code
128024284/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True)
code
128024284/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv', index_col='id') original = pd.read_csv('/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv') original['id'] = original['Row#'] original.drop(columns=['Row#'], inplace=True) original.set_index('id', inplace=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(original.corr(), annot=True) fig, ax = plt.subplots(1, 1, figsize=(15, 12)) sns.heatmap(train.corr() - original.corr(), annot=True)
code
90129110/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/yhydata/Dry_Bean.csv') minmax = preprocessing.MinMaxScaler() data_minmax = minmax.fit_transform(df.iloc[:, :-1]) le = preprocessing.LabelEncoder() df['Class'] = le.fit_transform(df.iloc[:, -1]) label = df['Class'] df = pd.DataFrame(data_minmax, columns=df.columns[:-1]) df['Class'] = label model = KNeighborsClassifier(n_neighbors=5) model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_knn = metrics.recall_score(test.iloc[:, -1], predict, average='micro') model = SVC() model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_svm = metrics.recall_score(test.iloc[:, -1], predict, average='micro') model = GaussianNB() model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_bayes = metrics.recall_score(test.iloc[:, -1], predict, average='micro') model = DecisionTreeClassifier() model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_tree = metrics.recall_score(test.iloc[:, -1], predict, average='micro') recall_noise = 0 noise_array_knn = [] epsilon_array = [] for r in range(1, 501): sensitivety = 1 epsilon = r epsilon_array.append(1 / epsilon) noise = np.random.laplace(0.0, sensitivety / epsilon, len(df)) df_noise = df.copy() for i in range(len(df.columns) - 1): for j in range(len(df)): df_noise.iloc[j, i] += noise[j] train_noise, _ = train_test_split(df_noise, test_size=0.2, random_state=7) model = KNeighborsClassifier(n_neighbors=5) model.fit(train_noise.iloc[:, :-1], train_noise.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) noise_array_knn.append(metrics.recall_score(test.iloc[:, -1], predict, average='micro')) plt.plot(noise_array_knn) plt.axhline(recall_original_knn, color='red')
code
90129110/cell_27
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import numpy as np import pandas as pd df = pd.read_csv('../input/yhydata/Dry_Bean.csv') minmax = preprocessing.MinMaxScaler() data_minmax = minmax.fit_transform(df.iloc[:, :-1]) le = preprocessing.LabelEncoder() df['Class'] = le.fit_transform(df.iloc[:, -1]) label = df['Class'] df = pd.DataFrame(data_minmax, columns=df.columns[:-1]) df['Class'] = label model = KNeighborsClassifier(n_neighbors=5) model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_knn = metrics.recall_score(test.iloc[:, -1], predict, average='micro') model = SVC() model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_svm = metrics.recall_score(test.iloc[:, -1], predict, average='micro') model = GaussianNB() model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_bayes = metrics.recall_score(test.iloc[:, -1], predict, average='micro') model = DecisionTreeClassifier() model.fit(train.iloc[:, :-1], train.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) recall_original_tree = metrics.recall_score(test.iloc[:, -1], predict, average='micro') recall_noise = 0 noise_array_knn = [] epsilon_array = [] for r in range(1, 501): sensitivety = 1 epsilon = r epsilon_array.append(1 / epsilon) noise = np.random.laplace(0.0, sensitivety / epsilon, len(df)) df_noise = df.copy() for i in range(len(df.columns) - 1): for j in range(len(df)): df_noise.iloc[j, i] += noise[j] train_noise, _ = train_test_split(df_noise, test_size=0.2, random_state=7) model = KNeighborsClassifier(n_neighbors=5) model.fit(train_noise.iloc[:, :-1], train_noise.iloc[:, -1]) predict = model.predict(test.iloc[:, :-1]) noise_array_knn.append(metrics.recall_score(test.iloc[:, -1], predict, average='micro')) print(max(noise_array_knn)) print(min(noise_array_knn))
code
2040512/cell_13
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) daily_Data['a'] = daily_Data.AppointmentDay.apply(lambda x: x.split('T')[0]) daily_Data['s'] = daily_Data.ScheduledDay.apply(lambda x: x.split('T')[0]) daily_Data.dtypes
code
2040512/cell_9
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) daily_Data.head()
code
2040512/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') (daily_Data['Gender'] == 'F').value_counts()
code
2040512/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') f, ax = plt.subplots(figsize=(15, 10)) sn.countplot(y='Handcap', data=daily_Data, color='c')
code
2040512/cell_11
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) daily_Data['a'] = daily_Data.AppointmentDay.apply(lambda x: x.split('T')[0]) daily_Data['a'].head(2)
code
2040512/cell_7
[ "text_html_output_1.png" ]
from sklearn import preprocessing import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) daily_Data.head()
code
2040512/cell_8
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) print('Age:', sorted(daily_Data.Age.unique())) print('Gender:', daily_Data.Gender.unique()) print('Neighbourhood', daily_Data.Neighbourhood.unique()) print('Scholarship:', daily_Data.Scholarship.unique()) print('Hipertension:', daily_Data.Hipertension.unique()) print('Diabetes:', daily_Data.Diabetes.unique()) print('Alcoholism:', daily_Data.Alcoholism.unique()) print('Handcap:', daily_Data.Handcap.unique()) print('SMS_received:', daily_Data.SMS_received.unique())
code
2040512/cell_16
[ "image_output_1.png" ]
from datetime import datetime from sklearn import preprocessing import calendar import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) daily_Data['a'] = daily_Data.AppointmentDay.apply(lambda x: x.split('T')[0]) daily_Data['s'] = daily_Data.ScheduledDay.apply(lambda x: x.split('T')[0]) daily_Data.dtypes daily_Data['weekday'] = daily_Data.a.apply(lambda dateString: calendar.day_name[datetime.strptime(dateString, '%Y-%m-%d').weekday()]) daily_Data.head()
code
2040512/cell_3
[ "text_html_output_1.png" ]
import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') daily_Data.head()
code
2040512/cell_17
[ "text_html_output_1.png" ]
from datetime import datetime from sklearn import preprocessing import calendar import pandas as pd daily_Data = pd.read_csv('../input/KaggleV2-May-2016.csv') from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(daily_Data['Gender']) daily_Data['Gender'] = le.transform(daily_Data['Gender']) le.fit(daily_Data['No-show']) daily_Data['No-show'] = le.transform(daily_Data['No-show']) le.fit(daily_Data['Neighbourhood']) daily_Data['Neighbourhood'] = le.transform(daily_Data['Neighbourhood']) daily_Data['a'] = daily_Data.AppointmentDay.apply(lambda x: x.split('T')[0]) daily_Data['s'] = daily_Data.ScheduledDay.apply(lambda x: x.split('T')[0]) daily_Data.dtypes daily_Data['weekday'] = daily_Data.a.apply(lambda dateString: calendar.day_name[datetime.strptime(dateString, '%Y-%m-%d').weekday()]) daily_Data.Age.unique()
code