path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90107080/cell_31
[ "text_plain_output_1.png" ]
from joblib import dump, load from sklearn.linear_model import LinearRegression , Ridge , LogisticRegression from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None) test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None) train = train.drop('Unnamed: 0', axis=1) train = train.fillna(train.mean()) train.columns X = train.drop(['satisfaction'], axis=1) y = train['satisfaction'] from sklearn.linear_model import LogisticRegression clf_lr = LogisticRegression(solver='liblinear') clf_lr.fit(X, y) dump(clf_lr, 'filename.joblib')
code
90107080/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression , Ridge , LogisticRegression from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None) test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None) train = train.drop('Unnamed: 0', axis=1) train = train.fillna(train.mean()) train.columns X = train.drop(['satisfaction'], axis=1) y = train['satisfaction'] from sklearn.linear_model import LogisticRegression clf_lr = LogisticRegression(solver='liblinear') clf_lr.fit(X, y)
code
90107080/cell_27
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import statsmodels.api as sm from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression from sklearn.preprocessing import LabelEncoder, PolynomialFeatures from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error from scipy import stats from sklearn.pipeline import make_pipeline from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn import tree from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier from imblearn.over_sampling import SMOTE from graphviz import Source from sklearn.naive_bayes import GaussianNB from sklearn.metrics import confusion_matrix, classification_report from mlxtend.plotting import plot_confusion_matrix from sklearn.model_selection import GridSearchCV, RandomizedSearchCV import warnings warnings.filterwarnings('ignore') sns.set(style='darkgrid') plt.style.use('fivethirtyeight') train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None) test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None) def dataset_overview(data, col): pass test = test.drop('Unnamed: 0', axis=1) test = test.fillna(test.mean()) def correlation_matrix(data): corr = data.corr().round(2) # Mask for the upper triangle mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True # Set figure size f, ax = plt.subplots(figsize=(20, 20)) # Define custom colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) # Draw the heatmap d=sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True) plt.tight_layout() return d def label_encoding(data, col): label_encoder = preprocessing.LabelEncoder() data[col] = label_encoder.fit_transform(data[col]) return label_encoding(test, 'Gender') label_encoding(test, 'Customer Type') label_encoding(test, 'Type of Travel') label_encoding(test, 'satisfaction') label_encoding(test, 'Class')
code
90107080/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv', index_col=None) test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv', index_col=None) train = train.drop('Unnamed: 0', axis=1) train = train.fillna(train.mean()) for _ in train.columns: print('The number of null values in:{} == {}'.format(_, train[_].isnull().sum()))
code
18152915/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') def map_atom_info(df, atom_idx): df = pd.merge(df, structures, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) df = df.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) return df train = map_atom_info(train, 0) train = map_atom_info(train, 1) test = map_atom_info(test, 0) test = map_atom_info(test, 1) train.describe()
code
18152915/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') test.head()
code
18152915/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') def map_atom_info(df, atom_idx): df = pd.merge(df, structures, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) df = df.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) return df train = map_atom_info(train, 0) train = map_atom_info(train, 1) test = map_atom_info(test, 0) test = map_atom_info(test, 1) train.head()
code
18152915/cell_19
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') def map_atom_info(df, atom_idx): df = pd.merge(df, structures, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) df = df.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) return df train = map_atom_info(train, 0) train = map_atom_info(train, 1) test = map_atom_info(test, 0) test = map_atom_info(test, 1) train.isnull().sum() train['molecule_name_unique'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['molecule_name'].nunique()) test['molecule_name_unique'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['molecule_name'].nunique()) train['molecule_name_type'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['type'].nunique()) test['molecule_name_type'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['type'].nunique()) train['molecule_dist_mean'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['dist'].mean()) test['molecule_dist_mean'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['dist'].mean()) train['molecule_dist_sum'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['dist'].sum()) test['molecule_dist_sum'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['dist'].sum()) train['molecule_dist_min'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['dist'].min()) test['molecule_dist_min'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['dist'].min()) train['molecule_atom_count'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['atom_1'].count()) test['molecule_atom_count'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['atom_1'].count()) train['molecule_atom_u'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['atom_1'].nunique()) test['molecule_atom_u'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['atom_1'].nunique()) train['type_unique'] = train['type'].map(train.groupby(train['type'])['type'].nunique()) test['type_unique'] = test['type'].map(test.groupby(test['type'])['type'].nunique()) train['type_dist_mean'] = train['type'].map(train.groupby(train['type'])['dist'].mean()) test['type_dist_mean'] = test['type'].map(test.groupby(test['type'])['dist'].mean()) train['type_dist_sum'] = train['type'].map(train.groupby(train['type'])['dist'].sum()) test['type_dist_sum'] = test['type'].map(test.groupby(test['type'])['dist'].sum()) train['type_dist_min'] = train['type'].map(train.groupby(train['type'])['dist'].min()) test['type_dist_min'] = test['type'].map(test.groupby(test['type'])['dist'].min()) train['type_atom_count'] = train['type'].map(train.groupby(train['type'])['atom_1'].count()) test['type_atom_count'] = test['type'].map(test.groupby(test['type'])['atom_1'].count()) train['type_atom_u'] = train['type'].map(train.groupby(train['type'])['atom_1'].nunique()) test['type_atom_u'] = test['type'].map(test.groupby(test['type'])['atom_1'].nunique()) train.head()
code
18152915/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18152915/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') print(f'There are {train.shape[0]} rows in train data.') print(f'There are {test.shape[0]} rows in test data.') print(f"There are {train['molecule_name'].nunique()} distinct molecules in train data.") print(f"There are {test['molecule_name'].nunique()} distinct molecules in test data.") print(f"There are {train['atom_index_0'].nunique()} unique atoms.") print(f"There are {train['type'].nunique()} unique types.")
code
18152915/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') len(structures)
code
18152915/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') def map_atom_info(df, atom_idx): df = pd.merge(df, structures, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) df = df.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) return df train = map_atom_info(train, 0) train = map_atom_info(train, 1) test = map_atom_info(test, 0) test = map_atom_info(test, 1) train.isnull().sum()
code
18152915/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') def map_atom_info(df, atom_idx): df = pd.merge(df, structures, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) df = df.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) return df train = map_atom_info(train, 0) train = map_atom_info(train, 1) test = map_atom_info(test, 0) test = map_atom_info(test, 1) train.isnull().sum() train['molecule_name_unique'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['molecule_name'].nunique()) test['molecule_name_unique'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['molecule_name'].nunique()) train['molecule_name_type'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['type'].nunique()) test['molecule_name_type'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['type'].nunique()) train['molecule_dist_mean'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['dist'].mean()) test['molecule_dist_mean'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['dist'].mean()) train['molecule_dist_sum'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['dist'].sum()) test['molecule_dist_sum'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['dist'].sum()) train['molecule_dist_min'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['dist'].min()) test['molecule_dist_min'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['dist'].min()) train['molecule_atom_count'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['atom_1'].count()) test['molecule_atom_count'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['atom_1'].count()) train['molecule_atom_u'] = train['molecule_name'].map(train.groupby(train['molecule_name'])['atom_1'].nunique()) test['molecule_atom_u'] = test['molecule_name'].map(test.groupby(test['molecule_name'])['atom_1'].nunique()) train['type_unique'] = train['type'].map(train.groupby(train['type'])['type'].nunique()) test['type_unique'] = test['type'].map(test.groupby(test['type'])['type'].nunique()) train['type_dist_mean'] = train['type'].map(train.groupby(train['type'])['dist'].mean()) test['type_dist_mean'] = test['type'].map(test.groupby(test['type'])['dist'].mean()) train['type_dist_sum'] = train['type'].map(train.groupby(train['type'])['dist'].sum()) test['type_dist_sum'] = test['type'].map(test.groupby(test['type'])['dist'].sum()) train['type_dist_min'] = train['type'].map(train.groupby(train['type'])['dist'].min()) test['type_dist_min'] = test['type'].map(test.groupby(test['type'])['dist'].min()) train['type_atom_count'] = train['type'].map(train.groupby(train['type'])['atom_1'].count()) test['type_atom_count'] = test['type'].map(test.groupby(test['type'])['atom_1'].count()) train['type_atom_u'] = train['type'].map(train.groupby(train['type'])['atom_1'].nunique()) test['type_atom_u'] = test['type'].map(test.groupby(test['type'])['atom_1'].nunique()) object_data = train.dtypes[train.dtypes == 'object'].index object_data
code
18152915/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') def map_atom_info(df, atom_idx): df = pd.merge(df, structures, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) df = df.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) return df train = map_atom_info(train, 0) train = map_atom_info(train, 1) test = map_atom_info(test, 0) test = map_atom_info(test, 1) print(train['atom_index_0'].nunique()) print(train['atom_index_1'].nunique()) print(train['id'].nunique())
code
18152915/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('../input/structures.csv') train.head()
code
105179548/cell_23
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import numpy as np import tensorflow as tf num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize = (20,20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) model = Sequential() model.add(Dense(units=128, input_shape=(784,), activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(x=x_train, y=y_train, batch_size=512, epochs=10) test_loss, test_acc = model.evaluate(x_test, y_test) y_pred = model.predict(x_test) y_pred_classes = np.argmax(y_pred, axis=1) print(y_pred) print(y_pred_classes)
code
105179548/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import tensorflow as tf num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize = (20,20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) model = Sequential() model.add(Dense(units=128, input_shape=(784,), activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(x=x_train, y=y_train, batch_size=512, epochs=10)
code
105179548/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import numpy as np import seaborn as sns import tensorflow as tf num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize = (20,20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) model = Sequential() model.add(Dense(units=128, input_shape=(784,), activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(x=x_train, y=y_train, batch_size=512, epochs=10) test_loss, test_acc = model.evaluate(x_test, y_test) y_pred = model.predict(x_test) y_pred_classes = np.argmax(y_pred, axis=1) random_idx = np.random.choice(len(x_test)) x_sample = x_test[random_idx] y_true = np.argmax(y_test, axis=1) y_sample_true = y_true[random_idx] y_sample_pred_class = y_pred_classes[random_idx] confusion_mtx = confusion_matrix(y_true, y_pred_classes) fig, ax = plt.subplots(figsize=(15, 10)) ax = sns.heatmap(confusion_mtx, annot=True, fmt='d', ax=ax, cmap='Blues') ax.set_xlabel('Predicted Label') ax.set_ylabel('True Label') ax.set_title('Confusion Matrix')
code
105179548/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.datasets import mnist from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data()
code
105179548/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential model = Sequential() model.add(Dense(units=128, input_shape=(784,), activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
code
105179548/cell_8
[ "image_output_1.png" ]
print(x_train.shape, y_train.shape) print(x_test.shape, y_test.shape)
code
105179548/cell_16
[ "text_plain_output_1.png" ]
x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) print(x_train.shape)
code
105179548/cell_24
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import numpy as np import tensorflow as tf num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize = (20,20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) model = Sequential() model.add(Dense(units=128, input_shape=(784,), activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(x=x_train, y=y_train, batch_size=512, epochs=10) test_loss, test_acc = model.evaluate(x_test, y_test) y_pred = model.predict(x_test) y_pred_classes = np.argmax(y_pred, axis=1) random_idx = np.random.choice(len(x_test)) x_sample = x_test[random_idx] y_true = np.argmax(y_test, axis=1) y_sample_true = y_true[random_idx] y_sample_pred_class = y_pred_classes[random_idx] plt.title(f'Predicted: {y_sample_pred_class}, \nTrue: {y_sample_true}', fontsize=16) plt.imshow(x_sample.reshape(28, 28), cmap='gray')
code
105179548/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import tensorflow as tf num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize = (20,20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) for i in range(10): print(y_train[i])
code
105179548/cell_22
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import tensorflow as tf num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize = (20,20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) model = Sequential() model.add(Dense(units=128, input_shape=(784,), activation='relu')) model.add(Dense(units=128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() model.fit(x=x_train, y=y_train, batch_size=512, epochs=10) test_loss, test_acc = model.evaluate(x_test, y_test) print(f'Test Loss: {test_loss}, \nTess Accuracy: {test_acc}')
code
105179548/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt num_classes = 10 f, ax = plt.subplots(1, num_classes, figsize=(20, 20)) for i in range(0, num_classes): sample = x_train[y_train == i][0] ax[i].imshow(sample, cmap='gray') ax[i].set_title(f'Label: {i}', fontsize=16)
code
105179548/cell_12
[ "text_plain_output_1.png" ]
for i in range(10): print(y_train[i])
code
2002001/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(10, 5)) sns.countplot(data=data, x='year')
code
2002001/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns a = data[['StartupName', 'IndustryVertical']].groupby('IndustryVertical').count().sort_values('StartupName', ascending=False).head(8) a.reset_index(inplace=True) plt.pie(a['StartupName'], labels=a['IndustryVertical']) plt.show()
code
2002001/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
b = data.SubVertical.value_counts().sort_values(ascending=False).head(10) location = data.CityLocation.value_counts().head(5) location.plot(kind='barh',figsize=(10, 5)) InvestorsName = data.InvestorsName.value_counts().head(10) InvestorsName.plot(kind='barh',figsize=(15, 10)) InvestmentType = data.InvestmentType.value_counts().head(5) InvestmentType.plot(kind='barh',figsize=(15, 5)) AmountInUSD = data.AmountInUSD.value_counts().head(5) AmountInUSD.plot(kind='barh',figsize=(15, 5)) amount = data[['StartupName', 'AmountInUSD']].groupby('AmountInUSD').count().sort_values('StartupName', ascending=False).head(10) amount
code
2002001/cell_8
[ "text_html_output_1.png" ]
b = data.SubVertical.value_counts().sort_values(ascending=False).head(10) b.plot(kind='barh', figsize=(15, 10))
code
2002001/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
b = data.SubVertical.value_counts().sort_values(ascending=False).head(10) location = data.CityLocation.value_counts().head(5) location.plot(kind='barh',figsize=(10, 5)) InvestorsName = data.InvestorsName.value_counts().head(10) InvestorsName.plot(kind='barh',figsize=(15, 10)) InvestmentType = data.InvestmentType.value_counts().head(5) InvestmentType.plot(kind='barh',figsize=(15, 5)) AmountInUSD = data.AmountInUSD.value_counts().head(5) AmountInUSD.plot(kind='barh', figsize=(15, 5))
code
2002001/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
b = data.SubVertical.value_counts().sort_values(ascending=False).head(10) location = data.CityLocation.value_counts().head(5) location.plot(kind='barh',figsize=(10, 5)) InvestorsName = data.InvestorsName.value_counts().head(10) InvestorsName.plot(kind='barh',figsize=(15, 10)) InvestmentType = data.InvestmentType.value_counts().head(5) InvestmentType.plot(kind='barh', figsize=(15, 5))
code
2002001/cell_10
[ "text_plain_output_1.png", "image_output_1.png" ]
b = data.SubVertical.value_counts().sort_values(ascending=False).head(10) location = data.CityLocation.value_counts().head(5) location.plot(kind='barh', figsize=(10, 5))
code
2002001/cell_12
[ "image_output_1.png" ]
b = data.SubVertical.value_counts().sort_values(ascending=False).head(10) location = data.CityLocation.value_counts().head(5) location.plot(kind='barh',figsize=(10, 5)) InvestorsName = data.InvestorsName.value_counts().head(10) InvestorsName.plot(kind='barh', figsize=(15, 10))
code
105179600/cell_4
[ "text_plain_output_1.png" ]
a = open('../input/poetry/Kanye_West.txt') a.read()
code
105179600/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
a = open('../input/poetry/Kanye_West.txt') a.read() a.close() print(a.read())
code
333675/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) events = pd.read_csv('../input/events.csv', parse_dates=['timestamp']) test = pd.read_csv('../input/gender_age_test.csv') train = pd.read_csv('../input/gender_age_train.csv')
code
333675/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output events = pd.read_csv('../input/events.csv', parse_dates=['timestamp']) test = pd.read_csv('../input/gender_age_test.csv') train = pd.read_csv('../input/gender_age_train.csv') events['timestamp'].loc[(events.longitude != 0.0) & (events.latitude != 0.0)] += events['longitude'].apply(lambda x: pd.Timedelta(seconds=240 * (x - 116.407))) events['hourly'] = events.timestamp.dt.hour events['hourly'].loc[(events.longitude != 0.0) & (events.latitude != 0.0)] = np.nan hourly = events.groupby('device_id')['hourly'].apply(lambda x: ' '.join((str(s) for s in x))) train['hourly'] = 'Hourly:' + train['device_id'].map(hourly).astype(str) test['hourly'] = 'Hourly:' + test['device_id'].map(hourly).astype(str) print(train.loc[['device_id', 'hourly']])
code
130026736/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] top_5_popular_movies = top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data=top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show()
code
130026736/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] top_5_popular_movies.head(5)
code
130026736/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng top_10_selling_movie = df[["title","revenue"]] top_10_selling_movie = top_10_selling_movie.sort_values(["revenue"],ascending=False) top_10_selling_movie=top_10_selling_movie.head(10) top_5_popular_movies=top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data = top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show() df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2 Top_10_anime_members = df_2[['name', 'members']] Top_10_anime_members = Top_10_anime_members.sort_values(['members'], ascending=False) Top_10_anime_members=Top_10_anime_members.head(10) plt.figure() sns.barplot(x='members', y='name', data=Top_10_anime_members, palette='viridis') plt.title('top 10 anime members') plt.xlabel('members') plt.ylabel('name') plt.show() top_10_anime_rating = df_2[['name', 'rating']] top_10_anime_rating = top_10_anime_rating.sort_values(['rating'], ascending=False) plt.figure(figsize=(40, 10)) sns.barplot(x='rating', y='name', data=top_10_anime_rating) plt.xlabel('Rating') plt.ylabel('Anime Name') plt.title('Top 10 Anime Ratings') plt.show()
code
130026736/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng top_10_selling_movie = df[["title","revenue"]] top_10_selling_movie = top_10_selling_movie.sort_values(["revenue"],ascending=False) top_10_selling_movie=top_10_selling_movie.head(10) top_5_popular_movies=top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data = top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show() df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2 Top_10_anime_members = df_2[['name', 'members']] Top_10_anime_members = Top_10_anime_members.sort_values(['members'], ascending=False) Top_10_anime_members = Top_10_anime_members.head(10) plt.figure() sns.barplot(x='members', y='name', data=Top_10_anime_members, palette='viridis') plt.title('top 10 anime members') plt.xlabel('members') plt.ylabel('name') plt.show()
code
130026736/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng
code
130026736/cell_26
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng top_10_selling_movie = df[["title","revenue"]] top_10_selling_movie = top_10_selling_movie.sort_values(["revenue"],ascending=False) top_10_selling_movie=top_10_selling_movie.head(10) top_5_popular_movies=top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data = top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show() df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2 Top_10_anime_members = df_2[['name', 'members']] Top_10_anime_members = Top_10_anime_members.sort_values(['members'], ascending=False) Top_10_anime_members=Top_10_anime_members.head(10) plt.figure() sns.barplot(x='members', y='name', data=Top_10_anime_members, palette='viridis') plt.title('top 10 anime members') plt.xlabel('members') plt.ylabel('name') plt.show() top_10_anime_rating = df_2[['name', 'rating']] top_10_anime_rating = top_10_anime_rating.sort_values(['rating'], ascending=False) df2_20 = df_2.head(20) plt.figure(figsize=(20, 10)) sns.lineplot(x='members', y='rating',data=df2_20, color='red') # plt.title('the relation between episodes and name') plt.xlabel('members') plt.ylabel('rating') top_5_anime_episodes = df_2[['name', 'episodes']] top_5_anime_episodes.head(10)
code
130026736/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df
code
130026736/cell_19
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2 Top_10_anime_members = df_2[['name', 'members']] Top_10_anime_members = Top_10_anime_members.sort_values(['members'], ascending=False) Top_10_anime_members.head(10)
code
130026736/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130026736/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng top_10_selling_movie = df[["title","revenue"]] top_10_selling_movie = top_10_selling_movie.sort_values(["revenue"],ascending=False) top_10_selling_movie=top_10_selling_movie.head(10) top_5_popular_movies=top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data = top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show() plt.figure(figsize=(20, 10)) sns.barplot(x='revenue', y='title', data=top_10_selling_movie)
code
130026736/cell_17
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2
code
130026736/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng top_10_selling_movie = df[["title","revenue"]] top_10_selling_movie = top_10_selling_movie.sort_values(["revenue"],ascending=False) top_10_selling_movie=top_10_selling_movie.head(10) top_5_popular_movies=top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data = top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show() df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2 Top_10_anime_members = df_2[['name', 'members']] Top_10_anime_members = Top_10_anime_members.sort_values(['members'], ascending=False) Top_10_anime_members=Top_10_anime_members.head(10) plt.figure() sns.barplot(x='members', y='name', data=Top_10_anime_members, palette='viridis') plt.title('top 10 anime members') plt.xlabel('members') plt.ylabel('name') plt.show() top_10_anime_rating = df_2[['name', 'rating']] top_10_anime_rating = top_10_anime_rating.sort_values(['rating'], ascending=False) df2_20 = df_2.head(20) plt.figure(figsize=(20, 10)) sns.lineplot(x='members', y='rating', data=df2_20, color='red') plt.xlabel('members') plt.ylabel('rating')
code
130026736/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df top_5_popular_movies = df[['title', 'popularity']] most_popularity_lng = df.groupby('original_language')['popularity'].mean() most_popularity_lng top_5_popular_movies=top_5_popular_movies.head(5) plt.figure() sns.barplot(x='popularity', y='title', data = top_5_popular_movies, palette='viridis') plt.title('top 5 popular movie') plt.xlabel('popularity') plt.ylabel('title') plt.show() plt.figure(figsize=(20, 30)) sns.barplot(x=most_popularity_lng.values, y=most_popularity_lng.index, data=top_5_popular_movies) plt.title('The Average of popularity for each language') plt.xlabel('Avg') plt.ylabel('Language') plt.show()
code
130026736/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/top-10000-popular-movies-tmdb-05-2023/popular_10000_movies_tmdb.csv') df df_2 = pd.read_csv('/kaggle/input/anime-recommendations-database/anime.csv') df_2 top_10_anime_rating = df_2[['name', 'rating']] top_10_anime_rating = top_10_anime_rating.sort_values(['rating'], ascending=False) top_10_anime_rating.head(10)
code
130026736/cell_12
[ "text_html_output_1.png" ]
import seaborn as sns import matplotlib.pyplot as plt
code
34142220/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sns.set_style('dark') plt.figure(figsize=(10, 10)) sns.lineplot(d['OverallCond'], d['SalePrice'])
code
34142220/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sns.set_style('dark') plt.figure(figsize=(19, 13)) sns.barplot(x=d['MSSubClass'], y=d['SalePrice']) plt.title('saleprice for different building classes')
code
34142220/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') d.info()
code
34142220/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sns.set_style('dark') plt.figure(figsize=(10, 15)) sns.barplot(x=d['SaleType'], y=d['SalePrice'])
code
34142220/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') plt.figure(figsize=(15, 10)) sns.set_style('dark') sns.lineplot(x=d['YearBuilt'], y=d['SalePrice']) plt.title('saleprice difference on each year ')
code
34142220/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') d.describe()
code
34142220/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sns.set_style('dark') plt.figure(figsize=(15, 10)) sns.lineplot(x=d['YrSold'], y=d['SalePrice']) plt.title('saleprice vs year it sold')
code
34142220/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sns.set_style('dark') plt.figure(figsize=(20, 10)) sns.barplot(d['MSZoning'], d['SalePrice'])
code
34142220/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd d = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') te = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') d.head()
code
105201240/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data = data.drop(['Floor', 'Area Type', 'Area Locality'], axis=1) data data['Posted_On'] = data['Posted_On'].astype(int)
code
105201240/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data
code
105201240/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.info()
code
105201240/cell_23
[ "text_html_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data['Posted_On'] = pd.to_datetime(data['Posted_On']) data['Posted_On'].dtypes data = data.drop('Posted On', axis=True) data data.dtypes data = data.drop(['Floor', 'Area Type', 'Area Locality'], axis=1) data from sklearn import preprocessing lb = preprocessing.LabelEncoder() data = data.apply(lb.fit_transform) data = pd.get_dummies(data, columns=['City'], drop_first=True) data.head()
code
105201240/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data = data.drop(['Floor', 'Area Type', 'Area Locality'], axis=1) data data['City'].unique()
code
105201240/cell_6
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data plt.figure(figsize=(10, 6)) sns.heatmap(data.corr(), annot=True)
code
105201240/cell_29
[ "text_html_output_1.png" ]
from lazypredict.Supervised import LazyRegressor import lazypredict from lazypredict.Supervised import LazyRegressor lazy_model = LazyRegressor() model, predict = lazy_model.fit(x_train, x_test, y_train, y_test) model
code
105201240/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data['Posted_On'].dt.day_name()
code
105201240/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data = data.drop(['Floor', 'Area Type', 'Area Locality'], axis=1) data
code
105201240/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data
code
105201240/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data
code
105201240/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum()
code
105201240/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data['Floor']
code
105201240/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data
code
105201240/cell_3
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data
code
105201240/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes data['House Floor'] = 0 list0 = data['Floor'].str.split(pat=' ', n=2, expand=True).iloc[:, 0] for i, v in enumerate(data['Floor']): if list0.iloc[i] == 'Ground': data['House Floor'][i] = 0 elif list0.iloc[i] == 'Upper': data['House Floor'][i] = -1 elif list0.iloc[i] == 'Lower': data['House Floor'][i] = -2 else: data['House Floor'][i] = list0[i]
code
105201240/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data = data.drop('Posted On', axis=True) data data.dtypes
code
105201240/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data['Posted_On'] = pd.to_datetime(data['Posted_On']) data['Posted_On'].dtypes
code
105201240/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import preprocessing from sklearn import preprocessing import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data['Posted_On'] = pd.to_datetime(data['Posted_On']) data['Posted_On'].dtypes data = data.drop('Posted On', axis=True) data data.dtypes data = data.drop(['Floor', 'Area Type', 'Area Locality'], axis=1) data from sklearn import preprocessing lb = preprocessing.LabelEncoder() data = data.apply(lb.fit_transform) data = pd.get_dummies(data, columns=['City'], drop_first=True) x = data.drop('Rent', axis=1) y = data['Rent'] from sklearn import preprocessing scaler = preprocessing.StandardScaler() x = scaler.fit_transform(x) x = pd.DataFrame(x) x
code
105201240/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.isnull().sum() data
code
105201240/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/house-rent-prediction-dataset/House_Rent_Dataset.csv') data data.describe()
code
130005860/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.shape
code
130005860/cell_25
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor reg = RandomForestRegressor(random_state=1) reg.fit(X_train, y_train) pred = reg.predict(X_val) from sklearn.metrics import mean_absolute_error mae = mean_absolute_error(y_val, pred) mae
code
130005860/cell_23
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor reg = RandomForestRegressor(random_state=1) reg.fit(X_train, y_train)
code
130005860/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.head()
code
130005860/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.shape train_df.columns train_df.isnull().sum() train_df.head(10)
code
130005860/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') test_df.head()
code
130005860/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.shape train_df.columns train_df.isnull().sum() train_df.describe()
code
130005860/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.shape train_df.columns train_df.isnull().sum() train_df.info()
code
130005860/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.shape train_df.columns
code
130005860/cell_27
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') from sklearn.ensemble import RandomForestRegressor reg = RandomForestRegressor(random_state=1) reg.fit(X_train, y_train) pred = reg.predict(X_val) prediction = reg.predict(test_df) submission = pd.DataFrame({'id': test_df.id, 'MedHouseVal': prediction}) submission.head()
code
130005860/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') test_df = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') train_df.shape train_df.columns train_df.isnull().sum()
code
88087414/cell_9
[ "image_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] print('shape train dataframe:', data_train.shape) print('shape test dataframe:', data_test.shape)
code
88087414/cell_34
[ "image_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) print(X_train[20]) print(' '.join([tokenizer.index_word[i] for i in X_train[20] if i != 0]))
code
88087414/cell_33
[ "image_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) print(X_train.shape, Y_train.shape) print(X_test.shape, Y_test.shape)
code
88087414/cell_44
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import LSTM,Embedding, Conv1D, Dense, Flatten, MaxPooling1D, Dropout , Bidirectional , Dropout , Flatten , GlobalMaxPooling1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.metrics import classification_report,confusion_matrix from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model import keras import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word) embed_dim = 50 vocab_size = len(tokenizer.index_word) + 1 model1 = Sequential() model1.add(Embedding(input_dim=vocab_size, input_length=31, output_dim=embed_dim)) model1.add(LSTM(30)) model1.add(Dropout(0.2)) model1.add(Flatten()) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from tensorflow.keras.utils import plot_model keras.backend.clear_session() batch_size = 32 history1 = model1.fit(X_train, Y_train, epochs=10, batch_size=batch_size, validation_data=(X_test, Y_test), verbose=1) scores = model1.evaluate(X_test, Y_test, verbose=0) predict = model1.predict(X_test) predict1 = [1 if i > 0.5 else 0 for i in predict] print(classification_report(Y_test, predict1))
code
88087414/cell_6
[ "image_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') len(data_test)
code