path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34135429/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.offline as py import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns color = sns.color_palette() import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import plotly.offline as py from plotly import tools py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.express as px nifty_50_df = pd.read_csv('../input/nifty-indices-dataset/NIFTY 50.csv', index_col='Date', parse_dates=['Date']) nifty_50_df.isnull().sum() nifty_50_df = nifty_50_df.fillna(method='ffill') def plot_attribute(df, attritube ,start='2000', end='2020',color ='blue'): fig, ax = plt.subplots(1, figsize=(20,5)) ax.plot(df[start:end].index, df[start:end][attritube],'tab:{}'.format(color)) ax.set_title("Nifty 50 stock {} from 2000 to 2020".format(attritube)) ax.axhline(y=df[start:end].describe()[attritube]["max"],linewidth=2, color='m') ax.axhline(y=df[start:end].describe()[attritube]["min"],linewidth=2, color='c') ax.axvline(x=df[attritube].idxmax(),linewidth=2, color='b') ax.axvline(x=df[attritube].idxmin() ,linewidth=2, color='y') ax.text(x=df[attritube].idxmax(), y=df[start:end].describe()[attritube]["max"], s='MAX', horizontalalignment='right', verticalalignment='bottom', color='blue', fontsize=20) ax.text(x=df[attritube].idxmin(), y=df[start:end].describe()[attritube]["min"], s='MIN', horizontalalignment='left', verticalalignment='top', color='red', fontsize=20) plt.show() print("Max Value : ",df[start:end].describe()[attritube]["max"]) print("Min Value : ",df[start:end].describe()[attritube]["min"]) plot_attribute(nifty_50_df, 'Low', color='orange')
code
34135429/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.offline as py import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns color = sns.color_palette() import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import plotly.offline as py from plotly import tools py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.express as px nifty_50_df = pd.read_csv('../input/nifty-indices-dataset/NIFTY 50.csv', index_col='Date', parse_dates=['Date']) nifty_50_df.isnull().sum() nifty_50_df = nifty_50_df.fillna(method='ffill') def plot_attribute(df, attritube ,start='2000', end='2020',color ='blue'): fig, ax = plt.subplots(1, figsize=(20,5)) ax.plot(df[start:end].index, df[start:end][attritube],'tab:{}'.format(color)) ax.set_title("Nifty 50 stock {} from 2000 to 2020".format(attritube)) ax.axhline(y=df[start:end].describe()[attritube]["max"],linewidth=2, color='m') ax.axhline(y=df[start:end].describe()[attritube]["min"],linewidth=2, color='c') ax.axvline(x=df[attritube].idxmax(),linewidth=2, color='b') ax.axvline(x=df[attritube].idxmin() ,linewidth=2, color='y') ax.text(x=df[attritube].idxmax(), y=df[start:end].describe()[attritube]["max"], s='MAX', horizontalalignment='right', verticalalignment='bottom', color='blue', fontsize=20) ax.text(x=df[attritube].idxmin(), y=df[start:end].describe()[attritube]["min"], s='MIN', horizontalalignment='left', verticalalignment='top', color='red', fontsize=20) plt.show() print("Max Value : ",df[start:end].describe()[attritube]["max"]) print("Min Value : ",df[start:end].describe()[attritube]["min"]) plot_attribute(nifty_50_df, 'Volume', color='blue')
code
34135429/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd nifty_50_df = pd.read_csv('../input/nifty-indices-dataset/NIFTY 50.csv', index_col='Date', parse_dates=['Date']) nifty_50_df.head(5)
code
34135429/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.offline as py import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns color = sns.color_palette() import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import plotly.offline as py from plotly import tools py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.express as px nifty_50_df = pd.read_csv('../input/nifty-indices-dataset/NIFTY 50.csv', index_col='Date', parse_dates=['Date']) nifty_50_df.isnull().sum() nifty_50_df = nifty_50_df.fillna(method='ffill') def plot_attribute(df, attritube ,start='2000', end='2020',color ='blue'): fig, ax = plt.subplots(1, figsize=(20,5)) ax.plot(df[start:end].index, df[start:end][attritube],'tab:{}'.format(color)) ax.set_title("Nifty 50 stock {} from 2000 to 2020".format(attritube)) ax.axhline(y=df[start:end].describe()[attritube]["max"],linewidth=2, color='m') ax.axhline(y=df[start:end].describe()[attritube]["min"],linewidth=2, color='c') ax.axvline(x=df[attritube].idxmax(),linewidth=2, color='b') ax.axvline(x=df[attritube].idxmin() ,linewidth=2, color='y') ax.text(x=df[attritube].idxmax(), y=df[start:end].describe()[attritube]["max"], s='MAX', horizontalalignment='right', verticalalignment='bottom', color='blue', fontsize=20) ax.text(x=df[attritube].idxmin(), y=df[start:end].describe()[attritube]["min"], s='MIN', horizontalalignment='left', verticalalignment='top', color='red', fontsize=20) plt.show() print("Max Value : ",df[start:end].describe()[attritube]["max"]) print("Min Value : ",df[start:end].describe()[attritube]["min"]) plot_attribute(nifty_50_df, 'Turnover', color='red')
code
34135429/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.offline as py import seaborn as sns import warnings import numpy as np import pandas as pd import seaborn as sns color = sns.color_palette() import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') import plotly.offline as py from plotly import tools py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.express as px nifty_50_df = pd.read_csv('../input/nifty-indices-dataset/NIFTY 50.csv', index_col='Date', parse_dates=['Date']) nifty_50_df.isnull().sum() nifty_50_df = nifty_50_df.fillna(method='ffill') def plot_attribute(df, attritube ,start='2000', end='2020',color ='blue'): fig, ax = plt.subplots(1, figsize=(20,5)) ax.plot(df[start:end].index, df[start:end][attritube],'tab:{}'.format(color)) ax.set_title("Nifty 50 stock {} from 2000 to 2020".format(attritube)) ax.axhline(y=df[start:end].describe()[attritube]["max"],linewidth=2, color='m') ax.axhline(y=df[start:end].describe()[attritube]["min"],linewidth=2, color='c') ax.axvline(x=df[attritube].idxmax(),linewidth=2, color='b') ax.axvline(x=df[attritube].idxmin() ,linewidth=2, color='y') ax.text(x=df[attritube].idxmax(), y=df[start:end].describe()[attritube]["max"], s='MAX', horizontalalignment='right', verticalalignment='bottom', color='blue', fontsize=20) ax.text(x=df[attritube].idxmin(), y=df[start:end].describe()[attritube]["min"], s='MIN', horizontalalignment='left', verticalalignment='top', color='red', fontsize=20) plt.show() print("Max Value : ",df[start:end].describe()[attritube]["max"]) print("Min Value : ",df[start:end].describe()[attritube]["min"]) plot_attribute(nifty_50_df, 'High', color='green')
code
17109357/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Iris.csv') data.head()
code
17109357/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Iris.csv') data.columns data.Species.unique()
code
17109357/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
17109357/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt #drawing library import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Iris.csv') data.columns data.Species.unique() setosa = data[data.Species == 'Iris-setosa'] versicolor = data[data.Species == 'Iris-versicolor'] virginica = data[data.Species == 'Iris-virginica'] plt.plot(setosa.Id, setosa.PetalLengthCm, color='red', label='setosa') plt.plot(versicolor.Id, versicolor.PetalLengthCm, color='green', label='versicolor') plt.plot(virginica.Id, virginica.PetalLengthCm, color='blue', label='virginica') plt.xlabel('Id') plt.ylabel('PetalLengthCm') plt.legend() plt.show()
code
17109357/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Iris.csv') data.info()
code
17109357/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Iris.csv') data.columns
code
130010265/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender_counts = df['gender'].value_counts() diabetes_counts = df['diabetes'].value_counts() correlation_matrix = df.corr(numeric_only=True) sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm') plt.title('Correlation Heatmap') plt.show()
code
130010265/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') plt.boxplot(df['bmi']) plt.xlabel('BMI') plt.title('Box Plot of BMI') plt.show()
code
130010265/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') df.describe()
code
130010265/cell_2
[ "image_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import classification_report, confusion_matrix
code
130010265/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender_counts = df['gender'].value_counts() plt.scatter(df['blood_glucose_level'], df['HbA1c_level']) plt.xlabel('Blood Glucose Level') plt.ylabel('HbA1c Level') plt.title('Scatter Plot of Blood Glucose Level vs HbA1c Level') plt.show()
code
130010265/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') df.info()
code
130010265/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') plt.hist(df['age'], bins=10, edgecolor='k') plt.xlabel('Age') plt.ylabel('Frequency') plt.title('Histogram of Age') plt.show()
code
130010265/cell_15
[ "image_output_1.png" ]
from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender_counts = df['gender'].value_counts() diabetes_counts = df['diabetes'].value_counts() correlation_matrix = df.corr(numeric_only=True) df_diabet = df[df['diabetes'] == 1] from pandas.plotting import scatter_matrix scatter_matrix(df_diabet[['age', 'bmi', 'blood_glucose_level']], figsize=(8, 8)) plt.show()
code
130010265/cell_16
[ "image_output_1.png" ]
from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender_counts = df['gender'].value_counts() diabetes_counts = df['diabetes'].value_counts() correlation_matrix = df.corr(numeric_only=True) df_diabet = df[df['diabetes'] == 1] from pandas.plotting import scatter_matrix sns.violinplot(x=df['diabetes'], y=df['age']) plt.xlabel('Diabetes') plt.ylabel('Age') plt.title('Violin Plot of Diabetes and Age') plt.show()
code
130010265/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender_counts = df['gender'].value_counts() plt.bar(gender_counts.index, gender_counts.values) plt.xlabel('Gender') plt.ylabel('Count') plt.title('Bar Plot of Gender') plt.show()
code
130010265/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender_counts = df['gender'].value_counts() diabetes_counts = df['diabetes'].value_counts() plt.pie(diabetes_counts.values, labels=diabetes_counts.index, autopct='%1.1f%%') plt.title('Pie Chart of Diabetes') plt.show()
code
130010265/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') df.head()
code
16147703/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if (train.at[i,'ZONE_CODE'] == 'ZONE01'): zone_1 = zone_1.append(train.iloc[i]) elif (train.at[i,'ZONE_CODE'] == 'ZONE02'): zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i,'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i,'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i,'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label="zone 1") line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label="zone 2") line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label="zone 3") ax.legend() plt.show() train = train.loc[train.UPDATE_TIME >= '2019-03-01'] min_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).min().reset_index() max_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).max().reset_index() groupby_mean = train.groupby(['ZONE_CODE', 'HOUR_ID']).mean().reset_index() groupby_mean['smape_maxuser'] = (max_maxuser['MAX_USER'] - min_maxuser['MAX_USER']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_maxuser'] = min_maxuser['MAX_USER'] groupby_mean['smape_bandwidth'] = (max_maxuser['BANDWIDTH_TOTAL'] - min_maxuser['BANDWIDTH_TOTAL']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_bandwidth'] = min_maxuser['BANDWIDTH_TOTAL'] df_val = train.drop(['BANDWIDTH_TOTAL', 'MAX_USER'], axis=1).join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_test = test.join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_val.head()
code
16147703/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') train.tail()
code
16147703/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') print(train.shape) print(test.shape)
code
16147703/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if (train.at[i,'ZONE_CODE'] == 'ZONE01'): zone_1 = zone_1.append(train.iloc[i]) elif (train.at[i,'ZONE_CODE'] == 'ZONE02'): zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i,'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i,'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i,'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label="zone 1") line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label="zone 2") line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label="zone 3") ax.legend() plt.show() train = train.loc[train.UPDATE_TIME >= '2019-03-01'] min_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).min().reset_index() max_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).max().reset_index() groupby_mean = train.groupby(['ZONE_CODE', 'HOUR_ID']).mean().reset_index() groupby_mean['smape_maxuser'] = (max_maxuser['MAX_USER'] - min_maxuser['MAX_USER']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_maxuser'] = min_maxuser['MAX_USER'] groupby_mean['smape_bandwidth'] = (max_maxuser['BANDWIDTH_TOTAL'] - min_maxuser['BANDWIDTH_TOTAL']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_bandwidth'] = min_maxuser['BANDWIDTH_TOTAL'] df_val = train.drop(['BANDWIDTH_TOTAL', 'MAX_USER'], axis=1).join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_test = test.join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) THRESHOLD = 12 df_val.loc[(df_val['smape_bandwidth'] > THRESHOLD) & (df_val['min_of_bandwidth'] < 13), 'BANDWIDTH_TOTAL'] = np.nan df_test.loc[(df_test['smape_bandwidth'] > THRESHOLD) & (df_test['min_of_bandwidth'] < 13), 'BANDWIDTH_TOTAL'] = np.nan print(df_test['BANDWIDTH_TOTAL'].describe())
code
16147703/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error import os print(os.listdir('../input'))
code
16147703/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if (train.at[i,'ZONE_CODE'] == 'ZONE01'): zone_1 = zone_1.append(train.iloc[i]) elif (train.at[i,'ZONE_CODE'] == 'ZONE02'): zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i,'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i,'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i,'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label="zone 1") line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label="zone 2") line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label="zone 3") ax.legend() plt.show() train = train.loc[train.UPDATE_TIME >= '2019-03-01'] min_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).min().reset_index() max_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).max().reset_index() groupby_mean = train.groupby(['ZONE_CODE', 'HOUR_ID']).mean().reset_index() groupby_mean['smape_maxuser'] = (max_maxuser['MAX_USER'] - min_maxuser['MAX_USER']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_maxuser'] = min_maxuser['MAX_USER'] groupby_mean['smape_bandwidth'] = (max_maxuser['BANDWIDTH_TOTAL'] - min_maxuser['BANDWIDTH_TOTAL']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_bandwidth'] = min_maxuser['BANDWIDTH_TOTAL'] print(groupby_mean.shape)
code
16147703/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if (train.at[i,'ZONE_CODE'] == 'ZONE01'): zone_1 = zone_1.append(train.iloc[i]) elif (train.at[i,'ZONE_CODE'] == 'ZONE02'): zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i,'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i,'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i,'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label="zone 1") line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label="zone 2") line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label="zone 3") ax.legend() plt.show() train = train.loc[train.UPDATE_TIME >= '2019-03-01'] min_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).min().reset_index() max_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).max().reset_index() groupby_mean = train.groupby(['ZONE_CODE', 'HOUR_ID']).mean().reset_index() groupby_mean['smape_maxuser'] = (max_maxuser['MAX_USER'] - min_maxuser['MAX_USER']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_maxuser'] = min_maxuser['MAX_USER'] groupby_mean['smape_bandwidth'] = (max_maxuser['BANDWIDTH_TOTAL'] - min_maxuser['BANDWIDTH_TOTAL']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_bandwidth'] = min_maxuser['BANDWIDTH_TOTAL'] df_val = train.drop(['BANDWIDTH_TOTAL', 'MAX_USER'], axis=1).join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_test = test.join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) print(df_val.shape)
code
16147703/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') train.head()
code
16147703/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if (train.at[i,'ZONE_CODE'] == 'ZONE01'): zone_1 = zone_1.append(train.iloc[i]) elif (train.at[i,'ZONE_CODE'] == 'ZONE02'): zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i,'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i,'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i,'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label="zone 1") line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label="zone 2") line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label="zone 3") ax.legend() plt.show() train = train.loc[train.UPDATE_TIME >= '2019-03-01'] min_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).min().reset_index() max_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).max().reset_index() groupby_mean = train.groupby(['ZONE_CODE', 'HOUR_ID']).mean().reset_index() groupby_mean['smape_maxuser'] = (max_maxuser['MAX_USER'] - min_maxuser['MAX_USER']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_maxuser'] = min_maxuser['MAX_USER'] groupby_mean['smape_bandwidth'] = (max_maxuser['BANDWIDTH_TOTAL'] - min_maxuser['BANDWIDTH_TOTAL']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_bandwidth'] = min_maxuser['BANDWIDTH_TOTAL'] df_val = train.drop(['BANDWIDTH_TOTAL', 'MAX_USER'], axis=1).join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_test = test.join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_val['smape_bandwidth'].describe()
code
16147703/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if (train.at[i,'ZONE_CODE'] == 'ZONE01'): zone_1 = zone_1.append(train.iloc[i]) elif (train.at[i,'ZONE_CODE'] == 'ZONE02'): zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i,'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i,'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i,'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label="zone 1") line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label="zone 2") line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label="zone 3") ax.legend() plt.show() train = train.loc[train.UPDATE_TIME >= '2019-03-01'] min_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).min().reset_index() max_maxuser = train.groupby(['ZONE_CODE', 'HOUR_ID']).max().reset_index() groupby_mean = train.groupby(['ZONE_CODE', 'HOUR_ID']).mean().reset_index() groupby_mean['smape_maxuser'] = (max_maxuser['MAX_USER'] - min_maxuser['MAX_USER']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_maxuser'] = min_maxuser['MAX_USER'] groupby_mean['smape_bandwidth'] = (max_maxuser['BANDWIDTH_TOTAL'] - min_maxuser['BANDWIDTH_TOTAL']) / (max_maxuser['MAX_USER'] + min_maxuser['MAX_USER']) * 100 groupby_mean['min_of_bandwidth'] = min_maxuser['BANDWIDTH_TOTAL'] df_val = train.drop(['BANDWIDTH_TOTAL', 'MAX_USER'], axis=1).join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) df_test = test.join(groupby_mean.set_index(['ZONE_CODE', 'HOUR_ID']), on=['ZONE_CODE', 'HOUR_ID']) THRESHOLD = 12 df_val.loc[(df_val['smape_bandwidth'] > THRESHOLD) & (df_val['min_of_bandwidth'] < 13), 'BANDWIDTH_TOTAL'] = np.nan df_test.loc[(df_test['smape_bandwidth'] > THRESHOLD) & (df_test['min_of_bandwidth'] < 13), 'BANDWIDTH_TOTAL'] = np.nan df_val.loc[(df_val['smape_maxuser'] > THRESHOLD) & (df_val['min_of_maxuser'] < 13), 'MAX_USER'] = np.nan df_test.loc[(df_test['smape_maxuser'] > THRESHOLD) & (df_test['min_of_maxuser'] < 13), 'MAX_USER'] = np.nan print(df_test['MAX_USER'].describe())
code
16147703/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test_id.csv') train['UPDATE_TIME'] = train['UPDATE_TIME'].astype('datetime64[ns]') test['UPDATE_TIME'] = test['UPDATE_TIME'].astype('datetime64[ns]') zone_1 = pd.DataFrame() zone_2 = pd.DataFrame() zone_3 = pd.DataFrame() for i in range(train.shape[0]): if train.at[i, 'ZONE_CODE'] == 'ZONE01': zone_1 = zone_1.append(train.iloc[i]) elif train.at[i, 'ZONE_CODE'] == 'ZONE02': zone_2 = zone_2.append(train.iloc[i]) else: zone_3 = zone_3.append(train.iloc[i]) for i in range(zone_1.shape[0]): zone_1.at[i, 'HOUR_ID'] = i for i in range(zone_2.shape[0]): zone_2.at[i, 'HOUR_ID'] = i for i in range(zone_3.shape[0]): zone_3.at[i, 'HOUR_ID'] = i import matplotlib.pyplot as plt fig, ax = plt.subplots() line_1 = ax.plot(zone_1['HOUR_ID'], zone_1['MAX_USER'], label='zone 1') line_2 = ax.plot(zone_2['HOUR_ID'], zone_2['MAX_USER'], label='zone 2') line_3 = ax.plot(zone_3['HOUR_ID'], zone_3['MAX_USER'], label='zone 3') ax.legend() plt.show()
code
122255317/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() # create multiple subplots along rows fig, ax = plt.subplots(nrows=2) # create multiple subplots along columns fig, ax = plt.subplots(ncols=2) # create multiple subplots along rows and columns fig, ax = plt.subplots(nrows=2,ncols=2) plt.show() # create multiple subplots without overlapping fig, ax = plt.subplots(nrows=2,ncols=2) plt.tight_layout() # avoid overlapping plt.show() # define the index of subplots fig, axes = plt.subplots(nrows=2, ncols=2) axes[0,0].set(title='Upper Left [0,0]') axes[0,1].set(title='Upper Right [0,1]') axes[1,0].set(title='Lower Left [1,0]') axes[1,1].set(title='Lower Right [1,1]') plt.tight_layout() plt.show() x = [1, 2, 3, 4, 5, 6, 7, 8] y = [2, 3, 4, 5, 6, 7, 8, 9] fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 10)) ax[0, 0].plot(x, y) ax[0, 0].bar(x, y) ax[0, 1].scatter(x, y) ax[1, 0].bar(x, y) ax[1, 1].barh(x, y) plt.show()
code
122255317/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt fig, ax = plt.subplots() plt.show()
code
122255317/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() # create multiple subplots along rows fig, ax = plt.subplots(nrows=2) # create multiple subplots along columns fig, ax = plt.subplots(ncols=2) # create multiple subplots along rows and columns fig, ax = plt.subplots(nrows=2,ncols=2) plt.show() # create multiple subplots without overlapping fig, ax = plt.subplots(nrows=2,ncols=2) plt.tight_layout() # avoid overlapping plt.show() fig, axes = plt.subplots(nrows=2, ncols=2) axes[0, 0].set(title='Upper Left [0,0]') axes[0, 1].set(title='Upper Right [0,1]') axes[1, 0].set(title='Lower Left [1,0]') axes[1, 1].set(title='Lower Right [1,1]') plt.tight_layout() plt.show()
code
122255317/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() fig, ax = plt.subplots(figsize=(10, 10)) plt.show()
code
122255317/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() # create multiple subplots along rows fig, ax = plt.subplots(nrows=2) fig, ax = plt.subplots(ncols=2)
code
122255317/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() # create multiple subplots along rows fig, ax = plt.subplots(nrows=2) # create multiple subplots along columns fig, ax = plt.subplots(ncols=2) fig, ax = plt.subplots(nrows=2, ncols=2) plt.show()
code
122255317/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() # create multiple subplots along rows fig, ax = plt.subplots(nrows=2) # create multiple subplots along columns fig, ax = plt.subplots(ncols=2) # create multiple subplots along rows and columns fig, ax = plt.subplots(nrows=2,ncols=2) plt.show() fig, ax = plt.subplots(nrows=2, ncols=2) plt.tight_layout() plt.show()
code
122255317/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() fig, ax = plt.subplots(nrows=2)
code
122255317/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() fig, ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show()
code
122255317/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # create blank figure fig, ax = plt.subplots() plt.show() # resize figure fig, ax = plt.subplots(figsize=(10,10)) plt.show() # set axis with xlim and ylim, title, labels fig,ax = plt.subplots() ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis') plt.show() plt.savefig('chart1.png') plt.savefig('chart2.png', transparent=True)
code
333413/cell_13
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import KFold from sklearn.metrics import log_loss from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import pandas as pd import xgboost as xgb phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') gatrain = pd.read_csv('../input/gender_age_train.csv') gatest = pd.read_csv('../input/gender_age_test.csv') dup = phone.groupby('device_id').size() dup = dup[dup > 1] dup.shape dup = phone.loc[phone.device_id.isin(dup.index)] first = dup.groupby('device_id').first() last = dup.groupby('device_id').last() phone = phone.drop_duplicates('device_id', keep='first') lebrand = LabelEncoder().fit(phone.phone_brand) phone['brand'] = lebrand.transform(phone.phone_brand) m = phone.phone_brand.str.cat(phone.device_model) lemodel = LabelEncoder().fit(m) phone['model'] = lemodel.transform(m) phone['old_model'] = LabelEncoder().fit_transform(phone.device_model) train = gatrain.merge(phone) train['y'] = LabelEncoder().fit_transform(train['group']) params = {'objective': 'multi:softprob', 'num_class': 12, 'booster': 'gbtree', 'max_depth': 8, 'eval_metric': 'mlogloss', 'eta': 0.02, 'silent': 1, 'alpha': 3} def encode_cat(Xtrain, Xtest): model_age = Xtrain.groupby(['model'])['age'].agg('mean') brand_age = Xtrain.groupby(['brand'])['age'].agg('mean') Xtest['model_age'] = Xtest['model'].map(model_age) Xtrain['model_age'] = Xtrain['model'].map(model_age) Xtest['brand_age'] = Xtest['brand'].map(brand_age) Xtrain['brand_age'] = Xtrain['brand'].map(brand_age) return (Xtrain[['brand', 'model', 'old_model']], Xtest[['brand', 'model', 'old_model']]) y = train['y'] kf = KFold(train.shape[0], n_folds=5, shuffle=True, random_state=1024) pred = np.zeros((train.shape[0], 12)) for itrain, itest in kf: Xtrain = train.ix[itrain,] Xtest = train.ix[itest,] ytrain, ytest = (y[itrain], y[itest]) Xtrain, Xtest = encode_cat(Xtrain, Xtest) dtrain = xgb.DMatrix(Xtrain, label=ytrain) dvalid = xgb.DMatrix(Xtest, label=ytest) watchlist = [(dtrain, 'train'), (dvalid, 'eval')] gbm = xgb.train(params, dtrain, 600, evals=watchlist, early_stopping_rounds=25, verbose_eval=20) temp_pred = gbm.predict(dvalid) pred[itest, :] = temp_pred print(log_loss(ytest, temp_pred))
code
333413/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') gatrain = pd.read_csv('../input/gender_age_train.csv') gatest = pd.read_csv('../input/gender_age_test.csv') gatrain.head(3)
code
333413/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import matplotlib.cm as cm import os from sklearn.preprocessing import LabelEncoder from sklearn.cross_validation import KFold from sklearn.metrics import log_loss import xgboost as xgb
code
333413/cell_11
[ "text_html_output_1.png" ]
params = {'objective': 'multi:softprob', 'num_class': 12, 'booster': 'gbtree', 'max_depth': 8, 'eval_metric': 'mlogloss', 'eta': 0.02, 'silent': 1, 'alpha': 3}
code
333413/cell_15
[ "text_html_output_1.png" ]
from sklearn.cross_validation import KFold from sklearn.metrics import log_loss from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import pandas as pd import xgboost as xgb phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') gatrain = pd.read_csv('../input/gender_age_train.csv') gatest = pd.read_csv('../input/gender_age_test.csv') dup = phone.groupby('device_id').size() dup = dup[dup > 1] dup.shape dup = phone.loc[phone.device_id.isin(dup.index)] first = dup.groupby('device_id').first() last = dup.groupby('device_id').last() phone = phone.drop_duplicates('device_id', keep='first') lebrand = LabelEncoder().fit(phone.phone_brand) phone['brand'] = lebrand.transform(phone.phone_brand) m = phone.phone_brand.str.cat(phone.device_model) lemodel = LabelEncoder().fit(m) phone['model'] = lemodel.transform(m) phone['old_model'] = LabelEncoder().fit_transform(phone.device_model) train = gatrain.merge(phone) train['y'] = LabelEncoder().fit_transform(train['group']) params = {'objective': 'multi:softprob', 'num_class': 12, 'booster': 'gbtree', 'max_depth': 8, 'eval_metric': 'mlogloss', 'eta': 0.02, 'silent': 1, 'alpha': 3} def encode_cat(Xtrain, Xtest): model_age = Xtrain.groupby(['model'])['age'].agg('mean') brand_age = Xtrain.groupby(['brand'])['age'].agg('mean') Xtest['model_age'] = Xtest['model'].map(model_age) Xtrain['model_age'] = Xtrain['model'].map(model_age) Xtest['brand_age'] = Xtest['brand'].map(brand_age) Xtrain['brand_age'] = Xtrain['brand'].map(brand_age) return (Xtrain[['brand', 'model', 'old_model']], Xtest[['brand', 'model', 'old_model']]) y = train['y'] kf = KFold(train.shape[0], n_folds=5, shuffle=True, random_state=1024) pred = np.zeros((train.shape[0], 12)) for itrain, itest in kf: Xtrain = train.ix[itrain,] Xtest = train.ix[itest,] ytrain, ytest = (y[itrain], y[itest]) Xtrain, Xtest = encode_cat(Xtrain, Xtest) dtrain = xgb.DMatrix(Xtrain, label=ytrain) dvalid = xgb.DMatrix(Xtest, label=ytest) watchlist = [(dtrain, 'train'), (dvalid, 'eval')] gbm = xgb.train(params, dtrain, 600, evals=watchlist, early_stopping_rounds=25, verbose_eval=20) temp_pred = gbm.predict(dvalid) pred[itest, :] = temp_pred pred.shape
code
333413/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') phone.head(3)
code
333413/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cross_validation import KFold from sklearn.metrics import log_loss from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import pandas as pd import xgboost as xgb phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') gatrain = pd.read_csv('../input/gender_age_train.csv') gatest = pd.read_csv('../input/gender_age_test.csv') dup = phone.groupby('device_id').size() dup = dup[dup > 1] dup.shape dup = phone.loc[phone.device_id.isin(dup.index)] first = dup.groupby('device_id').first() last = dup.groupby('device_id').last() phone = phone.drop_duplicates('device_id', keep='first') lebrand = LabelEncoder().fit(phone.phone_brand) phone['brand'] = lebrand.transform(phone.phone_brand) m = phone.phone_brand.str.cat(phone.device_model) lemodel = LabelEncoder().fit(m) phone['model'] = lemodel.transform(m) phone['old_model'] = LabelEncoder().fit_transform(phone.device_model) train = gatrain.merge(phone) train['y'] = LabelEncoder().fit_transform(train['group']) params = {'objective': 'multi:softprob', 'num_class': 12, 'booster': 'gbtree', 'max_depth': 8, 'eval_metric': 'mlogloss', 'eta': 0.02, 'silent': 1, 'alpha': 3} def encode_cat(Xtrain, Xtest): model_age = Xtrain.groupby(['model'])['age'].agg('mean') brand_age = Xtrain.groupby(['brand'])['age'].agg('mean') Xtest['model_age'] = Xtest['model'].map(model_age) Xtrain['model_age'] = Xtrain['model'].map(model_age) Xtest['brand_age'] = Xtest['brand'].map(brand_age) Xtrain['brand_age'] = Xtrain['brand'].map(brand_age) return (Xtrain[['brand', 'model', 'old_model']], Xtest[['brand', 'model', 'old_model']]) y = train['y'] kf = KFold(train.shape[0], n_folds=5, shuffle=True, random_state=1024) pred = np.zeros((train.shape[0], 12)) for itrain, itest in kf: Xtrain = train.ix[itrain,] Xtest = train.ix[itest,] ytrain, ytest = (y[itrain], y[itest]) Xtrain, Xtest = encode_cat(Xtrain, Xtest) dtrain = xgb.DMatrix(Xtrain, label=ytrain) dvalid = xgb.DMatrix(Xtest, label=ytest) watchlist = [(dtrain, 'train'), (dvalid, 'eval')] gbm = xgb.train(params, dtrain, 600, evals=watchlist, early_stopping_rounds=25, verbose_eval=20) temp_pred = gbm.predict(dvalid) pred[itest, :] = temp_pred log_loss(train['y'].values.tolist(), pred)
code
333413/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') gatrain = pd.read_csv('../input/gender_age_train.csv') gatest = pd.read_csv('../input/gender_age_test.csv') dup = phone.groupby('device_id').size() dup = dup[dup > 1] dup.shape dup = phone.loc[phone.device_id.isin(dup.index)] first = dup.groupby('device_id').first() last = dup.groupby('device_id').last() phone = phone.drop_duplicates('device_id', keep='first') lebrand = LabelEncoder().fit(phone.phone_brand) phone['brand'] = lebrand.transform(phone.phone_brand) m = phone.phone_brand.str.cat(phone.device_model) lemodel = LabelEncoder().fit(m) phone['model'] = lemodel.transform(m) phone['old_model'] = LabelEncoder().fit_transform(phone.device_model) train = gatrain.merge(phone) train['y'] = LabelEncoder().fit_transform(train['group']) train.head()
code
333413/cell_12
[ "text_html_output_1.png" ]
def encode_cat(Xtrain, Xtest): model_age = Xtrain.groupby(['model'])['age'].agg('mean') brand_age = Xtrain.groupby(['brand'])['age'].agg('mean') Xtest['model_age'] = Xtest['model'].map(model_age) Xtrain['model_age'] = Xtrain['model'].map(model_age) Xtest['brand_age'] = Xtest['brand'].map(brand_age) Xtrain['brand_age'] = Xtrain['brand'].map(brand_age) return (Xtrain[['brand', 'model', 'old_model']], Xtest[['brand', 'model', 'old_model']])
code
333413/cell_5
[ "text_plain_output_100.png", "text_plain_output_334.png", "text_plain_output_770.png", "text_plain_output_743.png", "text_plain_output_673.png", "text_plain_output_445.png", "text_plain_output_640.png", "text_plain_output_822.png", "text_plain_output_201.png", "text_plain_output_586.png", "text_plain_output_261.png", "text_plain_output_775.png", "text_plain_output_819.png", "text_plain_output_565.png", "text_plain_output_522.png", "text_plain_output_84.png", "text_plain_output_624.png", "text_plain_output_521.png", "text_plain_output_322.png", "text_plain_output_769.png", "text_plain_output_205.png", "text_plain_output_826.png", "text_plain_output_693.png", "text_plain_output_828.png", "text_plain_output_824.png", "text_plain_output_511.png", "text_plain_output_608.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_715.png", "text_plain_output_282.png", "text_plain_output_579.png", "text_plain_output_793.png", "text_plain_output_629.png", "text_plain_output_396.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "application_vnd.jupyter.stderr_output_768.png", "text_plain_output_139.png", "text_plain_output_362.png", "text_plain_output_813.png", "text_plain_output_35.png", "text_plain_output_697.png", "text_plain_output_501.png", "text_plain_output_593.png", "text_plain_output_258.png", "text_plain_output_685.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_598.png", "text_plain_output_490.png", "text_plain_output_790.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_367.png", "text_plain_output_750.png", "text_plain_output_262.png", "application_vnd.jupyter.stderr_output_9.png", "text_plain_output_278.png", "text_plain_output_588.png", "text_plain_output_395.png", "text_plain_output_617.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_570.png", "text_plain_output_799.png", "text_plain_output_674.png", "text_plain_output_833.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_671.png", "text_plain_output_718.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_756.png", "text_plain_output_678.png", "text_plain_output_688.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_614.png", "text_plain_output_420.png", "text_plain_output_514.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_576.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_670.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "text_plain_output_184.png", "text_plain_output_477.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_664.png", "text_plain_output_716.png", "text_plain_output_627.png", "text_plain_output_613.png", "text_plain_output_736.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "text_plain_output_684.png", "text_plain_output_774.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_331.png", "text_plain_output_809.png", "text_plain_output_642.png", "text_plain_output_550.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_676.png", "text_plain_output_704.png", "text_plain_output_687.png", "text_plain_output_776.png", "text_plain_output_492.png", "text_plain_output_321.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_748.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_634.png", "text_plain_output_656.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "text_plain_output_771.png", "text_plain_output_651.png", "text_plain_output_437.png", "text_plain_output_198.png", "text_plain_output_699.png", "text_plain_output_387.png", "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_555.png", "text_plain_output_548.png", "text_plain_output_759.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "text_plain_output_404.png", "text_plain_output_831.png", "text_plain_output_114.png", "text_plain_output_659.png", "text_plain_output_515.png", "text_plain_output_157.png", "text_plain_output_773.png", "text_plain_output_494.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_836.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_712.png", "text_plain_output_484.png", "text_plain_output_44.png", "text_plain_output_633.png", "text_plain_output_796.png", "text_plain_output_325.png", "text_plain_output_785.png", "text_plain_output_203.png", "text_plain_output_505.png", "text_plain_output_603.png", "text_plain_output_655.png", "text_plain_output_119.png", "text_plain_output_546.png", "text_plain_output_540.png", "text_plain_output_373.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_741.png", "text_plain_output_118.png", "text_plain_output_551.png", "text_plain_output_583.png", "text_plain_output_131.png", "text_plain_output_817.png", "text_plain_output_40.png", "text_plain_output_343.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_734.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_604.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_639.png", "text_plain_output_20.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_557.png", "text_plain_output_273.png", "text_plain_output_706.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_686.png", "text_plain_output_802.png", "text_plain_output_753.png", "text_plain_output_669.png", "text_plain_output_414.png", "text_plain_output_461.png", "text_plain_output_510.png", "text_plain_output_222.png", "text_plain_output_589.png", "text_plain_output_101.png", "text_plain_output_530.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_489.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_779.png", "application_vnd.jupyter.stderr_output_6.png", "text_plain_output_795.png", "text_plain_output_725.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_691.png", "text_plain_output_764.png", "text_plain_output_467.png", "text_plain_output_502.png", "text_plain_output_794.png", "text_plain_output_221.png", "text_plain_output_596.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_720.png", "text_plain_output_654.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_638.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_65.png", "text_plain_output_618.png", "text_plain_output_64.png", "text_plain_output_803.png", "text_plain_output_419.png", "text_plain_output_215.png", "text_plain_output_818.png", "text_plain_output_532.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_637.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_666.png", "text_plain_output_107.png", "text_plain_output_746.png", "text_plain_output_567.png", "text_plain_output_628.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_plain_output_695.png", "text_plain_output_318.png", "text_plain_output_808.png", "text_plain_output_417.png", "text_plain_output_707.png", "text_plain_output_690.png", "text_plain_output_52.png", "text_plain_output_545.png", "text_plain_output_393.png", "text_plain_output_758.png", "text_plain_output_572.png", "text_plain_output_594.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "text_plain_output_611.png", "text_plain_output_45.png", "text_plain_output_380.png", "text_plain_output_599.png", "text_plain_output_692.png", "text_plain_output_442.png", "text_plain_output_665.png", "text_plain_output_300.png", "text_plain_output_660.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "text_plain_output_476.png", "text_plain_output_277.png", "text_plain_output_457.png", "text_plain_output_739.png", "text_plain_output_740.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_837.png", "text_plain_output_518.png", "text_plain_output_561.png", "text_plain_output_431.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_713.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "text_plain_output_140.png", "text_plain_output_606.png", "text_plain_output_763.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_783.png", "text_plain_output_129.png", "text_plain_output_728.png", "text_plain_output_349.png", "text_plain_output_242.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "text_plain_output_255.png", "application_vnd.jupyter.stderr_output_8.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_680.png", "text_plain_output_622.png", "text_plain_output_329.png", "text_plain_output_49.png", "text_plain_output_791.png", "text_plain_output_708.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_607.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_681.png", "text_plain_output_333.png", "text_plain_output_108.png", "text_plain_output_581.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_700.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_326.png", "text_plain_output_744.png", "text_plain_output_503.png", "text_plain_output_578.png", "text_plain_output_735.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_658.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_469.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_357.png", "text_plain_output_21.png", "text_plain_output_344.png", "text_plain_output_104.png", "text_plain_output_784.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_623.png", "text_plain_output_466.png", "text_plain_output_568.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "text_plain_output_288.png", "text_plain_output_535.png", "text_plain_output_842.png", "text_plain_output_527.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_839.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_646.png", "text_plain_output_383.png", "text_plain_output_207.png", "text_plain_output_766.png", "text_plain_output_391.png", "text_plain_output_413.png", "text_plain_output_709.png", "text_plain_output_96.png", "text_plain_output_663.png", "text_plain_output_87.png", "text_plain_output_217.png", "text_plain_output_418.png", "text_plain_output_657.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_701.png", "text_plain_output_191.png", "text_plain_output_609.png", "text_plain_output_737.png", "text_plain_output_821.png", "text_plain_output_259.png", "text_plain_output_798.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_447.png", "text_plain_output_290.png", "text_plain_output_801.png", "text_plain_output_506.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "text_plain_output_835.png", "text_plain_output_113.png", "text_plain_output_371.png", "text_plain_output_827.png", "text_plain_output_479.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_730.png", "text_plain_output_69.png", "text_plain_output_368.png", "text_plain_output_667.png", "text_plain_output_372.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_767.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_754.png", "text_plain_output_454.png", "text_plain_output_806.png", "text_plain_output_814.png", "text_plain_output_487.png", "text_plain_output_595.png", "text_plain_output_643.png", "text_plain_output_338.png", "text_plain_output_575.png", "text_plain_output_197.png", "text_plain_output_843.png", "text_plain_output_512.png", "text_plain_output_777.png", "text_plain_output_738.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "text_plain_output_38.png", "text_plain_output_517.png", "text_plain_output_823.png", "text_plain_output_682.png", "text_plain_output_433.png", "text_plain_output_528.png", "text_plain_output_648.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "text_plain_output_726.png", "text_plain_output_513.png", "text_plain_output_714.png", "text_plain_output_314.png", "text_plain_output_745.png", "text_plain_output_592.png", "text_plain_output_410.png", "text_plain_output_432.png", "text_plain_output_645.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_787.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_497.png", "text_plain_output_789.png", "text_plain_output_174.png", "text_plain_output_812.png", "text_plain_output_212.png", "text_plain_output_652.png", "text_plain_output_644.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "text_plain_output_742.png", "text_plain_output_630.png", "text_plain_output_778.png", "text_plain_output_435.png", "text_plain_output_689.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_580.png", "text_plain_output_409.png", "text_plain_output_844.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_788.png", "text_plain_output_732.png", "text_plain_output_751.png", "text_plain_output_539.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_498.png", "text_plain_output_211.png", "text_plain_output_662.png", "text_plain_output_780.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_601.png", "text_plain_output_830.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_620.png", "text_plain_output_406.png", "text_plain_output_310.png", "text_plain_output_760.png", "text_plain_output_456.png", "text_plain_output_541.png", "text_plain_output_558.png", "text_plain_output_668.png", "text_plain_output_702.png", "text_plain_output_724.png", "text_plain_output_220.png", "text_plain_output_749.png", "text_plain_output_834.png", "text_plain_output_653.png", "text_plain_output_543.png", "text_plain_output_781.png", "text_plain_output_451.png", "text_plain_output_109.png", "text_plain_output_459.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_616.png", "text_plain_output_615.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_612.png", "text_plain_output_832.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_723.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_800.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_649.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_765.png", "text_plain_output_636.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_605.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_797.png", "text_plain_output_508.png", "text_plain_output_573.png", "text_plain_output_468.png", "text_plain_output_370.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_635.png", "text_plain_output_703.png", "text_plain_output_711.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "text_plain_output_696.png", "text_plain_output_816.png", "text_plain_output_23.png", "text_plain_output_761.png", "text_plain_output_610.png", "text_plain_output_173.png", "text_plain_output_683.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_632.png", "text_plain_output_51.png", "text_plain_output_677.png", "text_plain_output_626.png", "text_plain_output_450.png", "text_plain_output_252.png", "text_plain_output_825.png", "text_plain_output_296.png", "text_plain_output_525.png", "text_plain_output_731.png", "text_plain_output_672.png", "text_plain_output_705.png", "text_plain_output_840.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_820.png", "text_plain_output_537.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_602.png", "text_plain_output_246.png", "text_plain_output_569.png", "text_plain_output_772.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_710.png", "text_plain_output_500.png", "text_plain_output_719.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "text_plain_output_590.png", "text_plain_output_509.png", "text_plain_output_337.png", "text_plain_output_562.png", "text_plain_output_499.png", "text_plain_output_196.png", "text_plain_output_807.png", "text_plain_output_342.png", "text_plain_output_563.png", "text_plain_output_97.png", "text_plain_output_729.png", "text_plain_output_717.png", "text_plain_output_786.png", "text_plain_output_227.png", "text_plain_output_453.png", "text_plain_output_33.png", "text_plain_output_650.png", "text_plain_output_150.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_631.png", "text_plain_output_39.png", "text_plain_output_752.png", "text_plain_output_176.png", "text_plain_output_584.png", "text_plain_output_335.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_473.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_762.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_841.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_805.png", "text_plain_output_356.png", "text_plain_output_829.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_698.png", "text_plain_output_336.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_80.png", "text_plain_output_491.png", "text_plain_output_679.png", "text_plain_output_641.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_534.png", "text_plain_output_444.png", "text_plain_output_619.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_694.png", "text_plain_output_402.png", "text_plain_output_755.png", "text_plain_output_722.png", "text_plain_output_424.png", "text_plain_output_486.png", "text_plain_output_597.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_481.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_804.png", "text_plain_output_524.png", "text_plain_output_538.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_553.png", "text_plain_output_838.png", "text_plain_output_408.png", "text_plain_output_425.png", "text_plain_output_591.png", "text_plain_output_811.png", "text_plain_output_428.png", "text_plain_output_416.png", "text_plain_output_625.png", "text_plain_output_194.png", "text_plain_output_577.png", "text_plain_output_727.png", "text_plain_output_747.png", "text_plain_output_782.png", "text_plain_output_519.png", "text_plain_output_62.png", "text_plain_output_733.png", "text_plain_output_721.png", "text_plain_output_480.png", "text_plain_output_757.png", "text_plain_output_303.png", "text_plain_output_810.png", "text_plain_output_621.png", "text_plain_output_377.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "text_plain_output_464.png", "text_plain_output_156.png", "text_plain_output_547.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_587.png", "text_plain_output_448.png", "text_plain_output_364.png", "text_plain_output_365.png", "text_plain_output_815.png", "text_plain_output_61.png", "text_plain_output_792.png", "text_plain_output_585.png", "text_plain_output_352.png", "text_plain_output_83.png", "text_plain_output_374.png", "text_plain_output_647.png", "text_plain_output_472.png", "text_plain_output_566.png", "text_plain_output_397.png", "text_plain_output_600.png", "text_plain_output_661.png", "text_plain_output_389.png", "text_plain_output_292.png", "text_plain_output_351.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_574.png", "text_plain_output_582.png", "text_plain_output_306.png", "text_plain_output_675.png", "text_plain_output_493.png", "text_plain_output_46.png" ]
import pandas as pd import pandas as pd phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8') dup = phone.groupby('device_id').size() dup = dup[dup > 1] dup.shape
code
105204911/cell_3
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip uninstall -q -y transformers
code
105204911/cell_10
[ "text_plain_output_1.png" ]
from torch import nn from transformers import AutoModel import torch import torch from torch import nn from transformers import AutoModel @torch.no_grad() def turn_off_dropout(module: nn.Module) -> None: if isinstance(module, nn.Dropout): module.p = 0.0 model_path = 'distilbert-base-uncased' model = AutoModel.from_pretrained(model_path) model.apply(turn_off_dropout) model
code
50222260/cell_21
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier() classifier.fit(x_train, y_train) classifier.score(x_test, y_test) y_predicted = classifier.predict(x_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_predicted) cm
code
50222260/cell_23
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_digits from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import seaborn as sm from sklearn.datasets import load_digits digits = load_digits() plt.gray() from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier() classifier.fit(x_train, y_train) classifier.score(x_test, y_test) y_predicted = classifier.predict(x_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_predicted) cm plt.figure(figsize=(10, 7)) sm.heatmap(cm, annot=True) plt.xlabel('Predicted') plt.ylabel('True') plt.show()
code
50222260/cell_18
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier() classifier.fit(x_train, y_train) classifier.score(x_test, y_test)
code
50222260/cell_8
[ "image_output_1.png" ]
from sklearn.datasets import load_digits import matplotlib.pyplot as plt from sklearn.datasets import load_digits digits = load_digits() plt.gray() for i in range(5): plt.matshow(digits.images[i])
code
50222260/cell_16
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier() classifier.fit(x_train, y_train)
code
50222260/cell_10
[ "image_output_5.png", "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.datasets import load_digits import matplotlib.pyplot as plt import pandas as pd from sklearn.datasets import load_digits digits = load_digits() plt.gray() x = pd.DataFrame(digits.data) x
code
50222260/cell_12
[ "text_html_output_1.png" ]
from sklearn.datasets import load_digits import matplotlib.pyplot as plt import pandas as pd from sklearn.datasets import load_digits digits = load_digits() plt.gray() x = pd.DataFrame(digits.data) x y = pd.DataFrame(digits.target) y
code
106202736/cell_3
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import numpy as np import cv2 from numpy import save image_arr_new = np.load('../input/imagearray201/image_array_20_1.npy') image_array = [] for i in image_arr_new: image_array.append(cv2.resize(i, (227, 227))) image_array = np.array(image_array) image_array.shape
code
121154614/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd #for reading & storing data, pre-processing import pandas as pd # To use dataframes train = pd.read_csv('/kaggle/input/walmart-sales-forecast/train.csv') stores = pd.read_csv('/kaggle/input/walmart-sales-forecast/stores.csv') features = pd.read_csv('/kaggle/input/walmart-sales-forecast/features.csv') dataset = train indexedDataset = dataset.set_index(['Date'], inplace=True) dataset.drop(['Store', 'Dept', 'IsHoliday'], axis=1, inplace=True) dataset
code
121154614/cell_2
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import statsmodels.api as sm from sklearn.preprocessing import MinMaxScaler from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from datetime import datetime import numpy as np #for numerical computations like log,exp,sqrt etc import pandas as pd #for reading & storing data, pre-processing import matplotlib.pylab as plt #for visualization #for making sure matplotlib plots are generated in Jupyter notebook itself from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.stattools import acf, pacf from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.arima_model import ARIMA from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 10, 6 import numpy as np # To use np.arrays import pandas as pd # To use dataframes from pandas.plotting import autocorrelation_plot as auto_corr # To plot import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns #For date-time import math from datetime import datetime from datetime import timedelta # Another imports if needs import itertools import statsmodels.api as sm import statsmodels.tsa.api as smt import statsmodels.formula.api as smf from sklearn.model_selection import train_test_split from statsmodels.tsa.seasonal import seasonal_decompose as season from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.metrics import accuracy_score, balanced_accuracy_score from sklearn.model_selection import cross_val_score from sklearn.pipeline import make_pipeline, Pipeline from sklearn.ensemble import RandomForestRegressor from sklearn import metrics from sklearn.linear_model import LinearRegression from sklearn import preprocessing from statsmodels.tsa.holtwinters import ExponentialSmoothing from statsmodels.tsa.stattools import adfuller, acf, pacf from statsmodels.tsa.arima_model import ARIMA !pip install pmdarima from pmdarima.utils import decomposed_plot from pmdarima.arima import decompose from pmdarima import auto_arima
code
121154614/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd #for reading & storing data, pre-processing import pandas as pd # To use dataframes train = pd.read_csv('/kaggle/input/walmart-sales-forecast/train.csv') stores = pd.read_csv('/kaggle/input/walmart-sales-forecast/stores.csv') features = pd.read_csv('/kaggle/input/walmart-sales-forecast/features.csv') train.set_index('Date', inplace=True)
code
121154614/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd #for reading & storing data, pre-processing import pandas as pd # To use dataframes train = pd.read_csv('/kaggle/input/walmart-sales-forecast/train.csv') stores = pd.read_csv('/kaggle/input/walmart-sales-forecast/stores.csv') features = pd.read_csv('/kaggle/input/walmart-sales-forecast/features.csv') dataset = train indexedDataset = dataset.set_index(['Date'], inplace=True) dataset.drop(['Store', 'Dept', 'IsHoliday'], axis=1, inplace=True) dataset
code
106208686/cell_13
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense from keras_tuner import HyperModel from sklearn.preprocessing import MinMaxScaler from tcn import TCN from tensorflow import keras import keras import kerastuner as kt import numpy as np import pandas import tensorflow as tf import tensorflow as tf from tensorflow import keras from keras.layers import Dense from keras_tuner import HyperModel import kerastuner as kt from tcn import TCN def tcnModel(trainData, validateData, numOfObservedRecords: int, numOfPredictingRecords: int): hyperModel = TCNModel(numOfObservedRecords=numOfObservedRecords, numOfPredictingRecords=numOfPredictingRecords, numOfFeatures=trainData.shape[2]) bayesianTuner = kt.tuners.BayesianOptimization(hyperModel, objective='mse', max_trials=3, project_name='kerastuner_bayesian_poc', executions_per_trial=5, overwrite=True) bayesianTuner.search(trainData, validateData, epochs=100, validation_split=0.2, verbose=0) return bayesianTuner.get_best_models(num_models=1)[0] class TCNModel(HyperModel): def __init__(self, numOfObservedRecords, numOfPredictingRecords, numOfFeatures): self.numOfObservedRecords = numOfObservedRecords self.numOfPredictingRecords = numOfPredictingRecords self.numOfFeatures = numOfFeatures def build(self, params): model = keras.Sequential() model.add(TCN(input_shape=(self.numOfObservedRecords, 1), kernel_size=params.Int('units', min_value=2, max_value=8, step=1), use_skip_connections=params.Boolean('use_skip_connections'), use_batch_norm=False, use_weight_norm=False, use_layer_norm=True, dropout_rate=params.Float('drop_out', 0, 0.5, 0.1), nb_filters=params.Int('units', min_value=32, max_value=512, step=32))) model.add(Dense(self.numOfPredictingRecords, activation=params.Choice('dense_activation', values=['relu', 'tanh', 'sigmoid'], default='relu'))) model.compile(loss='mse', metrics=['mse'], optimizer=tf.keras.optimizers.Adam(params.Choice('learning_rate', values=[0.01, 0.001, 0.0001]))) return model from sklearn.preprocessing import MinMaxScaler def scale3DArray(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))) -> np.ndarray: scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return scaledArr def scale3DArrayReturningScaler(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))): scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return (scaledArr, scaler) data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') data = list(data['value'])[:10200] data_train = data[:int(len(data) * 0.8)] data_test = data[int(len(data) * 0.8):] O_data_test = data_test minmaxSc = MinMaxScaler() data_train = np.array(data_train) data_train = np.reshape(data_train, (int(data_train.shape[0] / 120), 120)) data_train = minmaxSc.fit_transform(data_train) train, validate = np.array_split(data_train, [72], 1) train = np.reshape(train, (train.shape[0], train.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) model = tcnModel(train, validate, 72, 48)
code
106208686/cell_4
[ "text_plain_output_1.png" ]
pip install keras-tcn
code
106208686/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') data = list(data['value'])[:10200] data_train = data[:int(len(data) * 0.8)] data_test = data[int(len(data) * 0.8):] print('len train ', len(data_train)) print('len test ', len(data_test)) O_data_test = data_test
code
106208686/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import keras from keras.layers import Dense import os import pandas for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106208686/cell_7
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense from keras_tuner import HyperModel from tcn import TCN from tensorflow import keras import keras import kerastuner as kt import tensorflow as tf import tensorflow as tf from tensorflow import keras from keras.layers import Dense from keras_tuner import HyperModel import kerastuner as kt from tcn import TCN def tcnModel(trainData, validateData, numOfObservedRecords: int, numOfPredictingRecords: int): hyperModel = TCNModel(numOfObservedRecords=numOfObservedRecords, numOfPredictingRecords=numOfPredictingRecords, numOfFeatures=trainData.shape[2]) bayesianTuner = kt.tuners.BayesianOptimization(hyperModel, objective='mse', max_trials=3, project_name='kerastuner_bayesian_poc', executions_per_trial=5, overwrite=True) bayesianTuner.search(trainData, validateData, epochs=100, validation_split=0.2, verbose=0) return bayesianTuner.get_best_models(num_models=1)[0] class TCNModel(HyperModel): def __init__(self, numOfObservedRecords, numOfPredictingRecords, numOfFeatures): self.numOfObservedRecords = numOfObservedRecords self.numOfPredictingRecords = numOfPredictingRecords self.numOfFeatures = numOfFeatures def build(self, params): model = keras.Sequential() model.add(TCN(input_shape=(self.numOfObservedRecords, 1), kernel_size=params.Int('units', min_value=2, max_value=8, step=1), use_skip_connections=params.Boolean('use_skip_connections'), use_batch_norm=False, use_weight_norm=False, use_layer_norm=True, dropout_rate=params.Float('drop_out', 0, 0.5, 0.1), nb_filters=params.Int('units', min_value=32, max_value=512, step=32))) model.add(Dense(self.numOfPredictingRecords, activation=params.Choice('dense_activation', values=['relu', 'tanh', 'sigmoid'], default='relu'))) model.compile(loss='mse', metrics=['mse'], optimizer=tf.keras.optimizers.Adam(params.Choice('learning_rate', values=[0.01, 0.001, 0.0001]))) return model
code
106208686/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense from keras_tuner import HyperModel from sklearn.preprocessing import MinMaxScaler from tcn import TCN from tensorflow import keras import keras import kerastuner as kt import numpy as np import pandas import tensorflow as tf import tensorflow as tf from tensorflow import keras from keras.layers import Dense from keras_tuner import HyperModel import kerastuner as kt from tcn import TCN def tcnModel(trainData, validateData, numOfObservedRecords: int, numOfPredictingRecords: int): hyperModel = TCNModel(numOfObservedRecords=numOfObservedRecords, numOfPredictingRecords=numOfPredictingRecords, numOfFeatures=trainData.shape[2]) bayesianTuner = kt.tuners.BayesianOptimization(hyperModel, objective='mse', max_trials=3, project_name='kerastuner_bayesian_poc', executions_per_trial=5, overwrite=True) bayesianTuner.search(trainData, validateData, epochs=100, validation_split=0.2, verbose=0) return bayesianTuner.get_best_models(num_models=1)[0] class TCNModel(HyperModel): def __init__(self, numOfObservedRecords, numOfPredictingRecords, numOfFeatures): self.numOfObservedRecords = numOfObservedRecords self.numOfPredictingRecords = numOfPredictingRecords self.numOfFeatures = numOfFeatures def build(self, params): model = keras.Sequential() model.add(TCN(input_shape=(self.numOfObservedRecords, 1), kernel_size=params.Int('units', min_value=2, max_value=8, step=1), use_skip_connections=params.Boolean('use_skip_connections'), use_batch_norm=False, use_weight_norm=False, use_layer_norm=True, dropout_rate=params.Float('drop_out', 0, 0.5, 0.1), nb_filters=params.Int('units', min_value=32, max_value=512, step=32))) model.add(Dense(self.numOfPredictingRecords, activation=params.Choice('dense_activation', values=['relu', 'tanh', 'sigmoid'], default='relu'))) model.compile(loss='mse', metrics=['mse'], optimizer=tf.keras.optimizers.Adam(params.Choice('learning_rate', values=[0.01, 0.001, 0.0001]))) return model from sklearn.preprocessing import MinMaxScaler def scale3DArray(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))) -> np.ndarray: scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return scaledArr def scale3DArrayReturningScaler(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))): scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return (scaledArr, scaler) data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') data = list(data['value'])[:10200] data_train = data[:int(len(data) * 0.8)] data_test = data[int(len(data) * 0.8):] O_data_test = data_test minmaxSc = MinMaxScaler() data_train = np.array(data_train) data_train = np.reshape(data_train, (int(data_train.shape[0] / 120), 120)) data_train = minmaxSc.fit_transform(data_train) train, validate = np.array_split(data_train, [72], 1) train = np.reshape(train, (train.shape[0], train.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) model = tcnModel(train, validate, 72, 48) data_test = np.array(data_test) data_test = np.reshape(data_test, (int(data_test.shape[0] / 120), 120)) data_test = minmaxSc.transform(data_test) test, validate = np.array_split(data_test, [72], 1) test = np.reshape(test, (test.shape[0], test.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) test_predict = np.array(model.predict(test)) print(test_predict.shape) test_predict = np.reshape(test_predict, (test_predict.shape[0], test_predict.shape[1], 1)) print(test_predict.shape) test_predict_full = np.concatenate((test, test_predict), axis=1) print(test_predict_full.shape) test_predict_full = np.squeeze(test_predict_full) print(test_predict_full.shape) test_predict_full = minmaxSc.inverse_transform(test_predict_full) test_predict_full = list(test_predict_full.flatten()) print(len(test_predict_full))
code
106208686/cell_16
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dense from keras_tuner import HyperModel from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import MinMaxScaler from tcn import TCN from tensorflow import keras import keras import kerastuner as kt import numpy as np import pandas import tensorflow as tf import tensorflow as tf from tensorflow import keras from keras.layers import Dense from keras_tuner import HyperModel import kerastuner as kt from tcn import TCN def tcnModel(trainData, validateData, numOfObservedRecords: int, numOfPredictingRecords: int): hyperModel = TCNModel(numOfObservedRecords=numOfObservedRecords, numOfPredictingRecords=numOfPredictingRecords, numOfFeatures=trainData.shape[2]) bayesianTuner = kt.tuners.BayesianOptimization(hyperModel, objective='mse', max_trials=3, project_name='kerastuner_bayesian_poc', executions_per_trial=5, overwrite=True) bayesianTuner.search(trainData, validateData, epochs=100, validation_split=0.2, verbose=0) return bayesianTuner.get_best_models(num_models=1)[0] class TCNModel(HyperModel): def __init__(self, numOfObservedRecords, numOfPredictingRecords, numOfFeatures): self.numOfObservedRecords = numOfObservedRecords self.numOfPredictingRecords = numOfPredictingRecords self.numOfFeatures = numOfFeatures def build(self, params): model = keras.Sequential() model.add(TCN(input_shape=(self.numOfObservedRecords, 1), kernel_size=params.Int('units', min_value=2, max_value=8, step=1), use_skip_connections=params.Boolean('use_skip_connections'), use_batch_norm=False, use_weight_norm=False, use_layer_norm=True, dropout_rate=params.Float('drop_out', 0, 0.5, 0.1), nb_filters=params.Int('units', min_value=32, max_value=512, step=32))) model.add(Dense(self.numOfPredictingRecords, activation=params.Choice('dense_activation', values=['relu', 'tanh', 'sigmoid'], default='relu'))) model.compile(loss='mse', metrics=['mse'], optimizer=tf.keras.optimizers.Adam(params.Choice('learning_rate', values=[0.01, 0.001, 0.0001]))) return model from sklearn.preprocessing import MinMaxScaler def scale3DArray(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))) -> np.ndarray: scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return scaledArr def scale3DArrayReturningScaler(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))): scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return (scaledArr, scaler) data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') data = list(data['value'])[:10200] data_train = data[:int(len(data) * 0.8)] data_test = data[int(len(data) * 0.8):] O_data_test = data_test minmaxSc = MinMaxScaler() data_train = np.array(data_train) data_train = np.reshape(data_train, (int(data_train.shape[0] / 120), 120)) data_train = minmaxSc.fit_transform(data_train) train, validate = np.array_split(data_train, [72], 1) train = np.reshape(train, (train.shape[0], train.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) model = tcnModel(train, validate, 72, 48) data_test = np.array(data_test) data_test = np.reshape(data_test, (int(data_test.shape[0] / 120), 120)) data_test = minmaxSc.transform(data_test) test, validate = np.array_split(data_test, [72], 1) test = np.reshape(test, (test.shape[0], test.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) test_predict = np.array(model.predict(test)) test_predict = np.reshape(test_predict, (test_predict.shape[0], test_predict.shape[1], 1)) test_predict_full = np.concatenate((test, test_predict), axis=1) test_predict_full = np.squeeze(test_predict_full) test_predict_full = minmaxSc.inverse_transform(test_predict_full) test_predict_full = list(test_predict_full.flatten()) from sklearn.metrics import mean_squared_error, r2_score r2 = round(r2_score(O_data_test, test_predict_full), 3) rmse = round(np.sqrt(mean_squared_error(O_data_test, test_predict_full)), 3) print(r2) print(rmse)
code
106208686/cell_14
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas from sklearn.preprocessing import MinMaxScaler def scale3DArray(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))) -> np.ndarray: scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return scaledArr def scale3DArrayReturningScaler(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))): scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return (scaledArr, scaler) data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') data = list(data['value'])[:10200] data_train = data[:int(len(data) * 0.8)] data_test = data[int(len(data) * 0.8):] O_data_test = data_test minmaxSc = MinMaxScaler() data_train = np.array(data_train) data_train = np.reshape(data_train, (int(data_train.shape[0] / 120), 120)) data_train = minmaxSc.fit_transform(data_train) train, validate = np.array_split(data_train, [72], 1) train = np.reshape(train, (train.shape[0], train.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) data_test = np.array(data_test) print(data_test.shape) data_test = np.reshape(data_test, (int(data_test.shape[0] / 120), 120)) print(data_test.shape) data_test = minmaxSc.transform(data_test) print(data_test.shape) test, validate = np.array_split(data_test, [72], 1) print(test.shape) print(validate.shape) test = np.reshape(test, (test.shape[0], test.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) print(test.shape) print(validate.shape)
code
106208686/cell_10
[ "text_plain_output_1.png" ]
import pandas data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') print(data.head()) print(len(data)) data = list(data['value'])[:10200] print(len(data))
code
106208686/cell_12
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas from sklearn.preprocessing import MinMaxScaler def scale3DArray(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))) -> np.ndarray: scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return scaledArr def scale3DArrayReturningScaler(arr: np.ndarray, scaler: any=MinMaxScaler(feature_range=(-1, 1))): scaledArr = np.reshape(arr, (arr.shape[0], arr.shape[1] * arr.shape[2])) scaledArr = scaler.fit_transform(scaledArr) scaledArr = np.reshape(scaledArr, tuple(arr.shape)) return (scaledArr, scaler) data = pandas.read_csv('../input/nyc-taxi-traffic/dataset.csv') data = list(data['value'])[:10200] data_train = data[:int(len(data) * 0.8)] data_test = data[int(len(data) * 0.8):] O_data_test = data_test minmaxSc = MinMaxScaler() data_train = np.array(data_train) print(data_train.shape) data_train = np.reshape(data_train, (int(data_train.shape[0] / 120), 120)) print(data_train.shape) data_train = minmaxSc.fit_transform(data_train) print(data_train.shape) train, validate = np.array_split(data_train, [72], 1) print(train.shape) print(validate.shape) train = np.reshape(train, (train.shape[0], train.shape[1], 1)) validate = np.reshape(validate, (validate.shape[0], validate.shape[1], 1)) print(train.shape) print(validate.shape)
code
16167156/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_lokasi = pd.read_csv('../input/catatan_lokasi.csv') profil_karyawan = pd.read_csv('../input/data_profil.csv') data_lokasi.info()
code
16167156/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_lokasi = pd.read_csv('../input/catatan_lokasi.csv') profil_karyawan = pd.read_csv('../input/data_profil.csv') profil_karyawan.info()
code
16167156/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_lokasi = pd.read_csv('../input/catatan_lokasi.csv') profil_karyawan = pd.read_csv('../input/data_profil.csv') print(data_lokasi.head()) print('ukuran data: ' + str(data_lokasi.shape))
code
16167156/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data_lokasi = pd.read_csv('../input/catatan_lokasi.csv') profil_karyawan = pd.read_csv('../input/data_profil.csv') print(profil_karyawan.head()) print('ukuran data: ' + str(profil_karyawan.shape))
code
16130176/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv') atomic_radius = {'H': 0.38, 'C': 0.77, 'N': 0.75, 'O': 0.73, 'F': 0.71, np.nan: 0} atomic_number = {'H': 1, 'C': 6, 'N': 7, 'O': 8, 'F': 9, np.nan: 0} atomic_mass = {'H': 1.0079, 'C': 12.0107, 'N': 14.0067, 'O': 15.9994, 'F': 18.9984, np.nan: 0} vanderwaalsradius = {'H': 120, 'C': 185, 'N': 154, 'O': 140, 'F': 135, np.nan: 0} covalenzradius = {'H': 30, 'C': 77, 'N': 70, 'O': 66, 'F': 58, np.nan: 0} electronegativity = {'H': 2.2, 'C': 2.55, 'N': 3.04, 'O': 3.44, 'F': 3.98, np.nan: 0} ionization_energy = {'H': 13.5984, 'C': 11.2603, 'N': 14.5341, 'O': 13.6181, 'F': 17.4228, np.nan: np.inf} def atom_props(df, suffix): df['atomic_radius' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_radius[x]) df['atomic_protons' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_number[x]) df['atomic_mass' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_mass[x]) df['vanderwaalsradius' + suffix] = df['atom_' + suffix].apply(lambda x: vanderwaalsradius[x]) df['covalenzradius' + suffix] = df['atom_' + suffix].apply(lambda x: covalenzradius[x]) df['electronegativity' + suffix] = df['atom_' + suffix].apply(lambda x: electronegativity[x]) df['ionization_energy' + suffix] = df['atom_' + suffix].apply(lambda x: ionization_energy[x]) return df train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) atom_cnt = structures['molecule_name'].value_counts().reset_index(level=0) atom_cnt.rename({'index': 'molecule_name', 'molecule_name': 'atom_count'}, axis=1, inplace=True) train = pd.merge(train, atom_cnt, how='left', on='molecule_name') test = pd.merge(test, atom_cnt, how='left', on='molecule_name') del atom_cnt def lr(df): df['atom_index_0l'] = df['atom_index_0'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_0', 'atom_count']] df['atom_index_0r'] = tmp.apply(lambda row: min(row['atom_index_0'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0r'}, axis=1) df['atom_index_1l'] = df['atom_index_1'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_1', 'atom_count']] df['atom_index_1r'] = tmp.apply(lambda row: min(row['atom_index_1'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1r'}, axis=1) return df train = lr(train) test = lr(test) train = atom_props(train, '0') train = atom_props(train, '0l') train = atom_props(train, '0r') train = atom_props(train, '1') train = atom_props(train, '1l') train = atom_props(train, '1r') test = atom_props(test, '0') test = atom_props(test, '0l') test = atom_props(test, '0r') test = atom_props(test, '1') test = atom_props(test, '1l') test = atom_props(test, '1r') train.drop(['atom_index_x', 'atom_index_y'], axis=1, inplace=True) test.drop(['atom_index_x', 'atom_index_y'], axis=1, inplace=True)
code
16130176/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv') train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) atom_cnt = structures['molecule_name'].value_counts().reset_index(level=0) atom_cnt.rename({'index': 'molecule_name', 'molecule_name': 'atom_count'}, axis=1, inplace=True) train = pd.merge(train, atom_cnt, how='left', on='molecule_name') test = pd.merge(test, atom_cnt, how='left', on='molecule_name') del atom_cnt
code
16130176/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16130176/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv') train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) atom_cnt = structures['molecule_name'].value_counts().reset_index(level=0) atom_cnt.rename({'index': 'molecule_name', 'molecule_name': 'atom_count'}, axis=1, inplace=True) train = pd.merge(train, atom_cnt, how='left', on='molecule_name') test = pd.merge(test, atom_cnt, how='left', on='molecule_name') del atom_cnt def lr(df): df['atom_index_0l'] = df['atom_index_0'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_0', 'atom_count']] df['atom_index_0r'] = tmp.apply(lambda row: min(row['atom_index_0'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0r'}, axis=1) df['atom_index_1l'] = df['atom_index_1'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_1', 'atom_count']] df['atom_index_1r'] = tmp.apply(lambda row: min(row['atom_index_1'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1r'}, axis=1) return df train = lr(train) test = lr(test)
code
16130176/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv') train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1)
code
16130176/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv') atomic_radius = {'H': 0.38, 'C': 0.77, 'N': 0.75, 'O': 0.73, 'F': 0.71, np.nan: 0} atomic_number = {'H': 1, 'C': 6, 'N': 7, 'O': 8, 'F': 9, np.nan: 0} atomic_mass = {'H': 1.0079, 'C': 12.0107, 'N': 14.0067, 'O': 15.9994, 'F': 18.9984, np.nan: 0} vanderwaalsradius = {'H': 120, 'C': 185, 'N': 154, 'O': 140, 'F': 135, np.nan: 0} covalenzradius = {'H': 30, 'C': 77, 'N': 70, 'O': 66, 'F': 58, np.nan: 0} electronegativity = {'H': 2.2, 'C': 2.55, 'N': 3.04, 'O': 3.44, 'F': 3.98, np.nan: 0} ionization_energy = {'H': 13.5984, 'C': 11.2603, 'N': 14.5341, 'O': 13.6181, 'F': 17.4228, np.nan: np.inf} def atom_props(df, suffix): df['atomic_radius' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_radius[x]) df['atomic_protons' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_number[x]) df['atomic_mass' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_mass[x]) df['vanderwaalsradius' + suffix] = df['atom_' + suffix].apply(lambda x: vanderwaalsradius[x]) df['covalenzradius' + suffix] = df['atom_' + suffix].apply(lambda x: covalenzradius[x]) df['electronegativity' + suffix] = df['atom_' + suffix].apply(lambda x: electronegativity[x]) df['ionization_energy' + suffix] = df['atom_' + suffix].apply(lambda x: ionization_energy[x]) return df train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) atom_cnt = structures['molecule_name'].value_counts().reset_index(level=0) atom_cnt.rename({'index': 'molecule_name', 'molecule_name': 'atom_count'}, axis=1, inplace=True) train = pd.merge(train, atom_cnt, how='left', on='molecule_name') test = pd.merge(test, atom_cnt, how='left', on='molecule_name') del atom_cnt def lr(df): df['atom_index_0l'] = df['atom_index_0'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_0', 'atom_count']] df['atom_index_0r'] = tmp.apply(lambda row: min(row['atom_index_0'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0r'}, axis=1) df['atom_index_1l'] = df['atom_index_1'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_1', 'atom_count']] df['atom_index_1r'] = tmp.apply(lambda row: min(row['atom_index_1'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1r'}, axis=1) return df train = lr(train) test = lr(test) train = atom_props(train, '0') train = atom_props(train, '0l') train = atom_props(train, '0r') train = atom_props(train, '1') train = atom_props(train, '1l') train = atom_props(train, '1r') test = atom_props(test, '0') test = atom_props(test, '0l') test = atom_props(test, '0r') test = atom_props(test, '1') test = atom_props(test, '1l') test = atom_props(test, '1r') train.drop(['atom_index_x', 'atom_index_y'], axis=1, inplace=True) test.drop(['atom_index_x', 'atom_index_y'], axis=1, inplace=True) # https://www.kaggle.com/c/champs-scalar-coupling/discussion/96655#latest-558745 def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: c_prec = df[col].apply(lambda x: np.finfo(x).precision).max() if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max and c_prec == np.finfo(np.float16).precision: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max and c_prec == np.finfo(np.float32).precision: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df train = reduce_mem_usage(train) test = reduce_mem_usage(test)
code
16130176/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv') atomic_radius = {'H': 0.38, 'C': 0.77, 'N': 0.75, 'O': 0.73, 'F': 0.71, np.nan: 0} atomic_number = {'H': 1, 'C': 6, 'N': 7, 'O': 8, 'F': 9, np.nan: 0} atomic_mass = {'H': 1.0079, 'C': 12.0107, 'N': 14.0067, 'O': 15.9994, 'F': 18.9984, np.nan: 0} vanderwaalsradius = {'H': 120, 'C': 185, 'N': 154, 'O': 140, 'F': 135, np.nan: 0} covalenzradius = {'H': 30, 'C': 77, 'N': 70, 'O': 66, 'F': 58, np.nan: 0} electronegativity = {'H': 2.2, 'C': 2.55, 'N': 3.04, 'O': 3.44, 'F': 3.98, np.nan: 0} ionization_energy = {'H': 13.5984, 'C': 11.2603, 'N': 14.5341, 'O': 13.6181, 'F': 17.4228, np.nan: np.inf} def atom_props(df, suffix): df['atomic_radius' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_radius[x]) df['atomic_protons' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_number[x]) df['atomic_mass' + suffix] = df['atom_' + suffix].apply(lambda x: atomic_mass[x]) df['vanderwaalsradius' + suffix] = df['atom_' + suffix].apply(lambda x: vanderwaalsradius[x]) df['covalenzradius' + suffix] = df['atom_' + suffix].apply(lambda x: covalenzradius[x]) df['electronegativity' + suffix] = df['atom_' + suffix].apply(lambda x: electronegativity[x]) df['ionization_energy' + suffix] = df['atom_' + suffix].apply(lambda x: ionization_energy[x]) return df train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) train = pd.merge(train, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0'}, axis=1) test = pd.merge(test, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1'}, axis=1) atom_cnt = structures['molecule_name'].value_counts().reset_index(level=0) atom_cnt.rename({'index': 'molecule_name', 'molecule_name': 'atom_count'}, axis=1, inplace=True) train = pd.merge(train, atom_cnt, how='left', on='molecule_name') test = pd.merge(test, atom_cnt, how='left', on='molecule_name') del atom_cnt def lr(df): df['atom_index_0l'] = df['atom_index_0'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_0', 'atom_count']] df['atom_index_0r'] = tmp.apply(lambda row: min(row['atom_index_0'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_0r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_0r'}, axis=1) df['atom_index_1l'] = df['atom_index_1'].apply(lambda i: max(i - 1, 0)) tmp = df[['atom_index_1', 'atom_count']] df['atom_index_1r'] = tmp.apply(lambda row: min(row['atom_index_1'] + 1, row['atom_count']), axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1l'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1l'}, axis=1) df = pd.merge(df, structures[['molecule_name', 'atom_index', 'atom']], how='left', left_on=['molecule_name', 'atom_index_1r'], right_on=['molecule_name', 'atom_index']).rename({'atom': 'atom_1r'}, axis=1) return df train = lr(train) test = lr(test) train = atom_props(train, '0') train = atom_props(train, '0l') train = atom_props(train, '0r') train = atom_props(train, '1') train = atom_props(train, '1l') train = atom_props(train, '1r') test = atom_props(test, '0') test = atom_props(test, '0l') test = atom_props(test, '0r') test = atom_props(test, '1') test = atom_props(test, '1l') test = atom_props(test, '1r')
code
16130176/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') structures = pd.read_csv('input/structures.csv')
code
16151986/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import roc_auc_score from sklearn.mixture import GaussianMixture import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['wheezy-copper-turtle-magic'] = train['wheezy-copper-turtle-magic'].astype('category') test['wheezy-copper-turtle-magic'] = test['wheezy-copper-turtle-magic'].astype('category') magicNum = 131073 default_cols = [c for c in train.columns if c not in ['id', 'target', 'target_pred', 'wheezy-copper-turtle-magic']] cols = [c for c in default_cols] sub = pd.read_csv('../input/sample_submission.csv') sub.to_csv('submission.csv', index=False) (train.shape, test.shape) if sub.shape[0] == magicNum: [].shape preds = np.zeros(len(test)) train_err = np.zeros(512) test_err = np.zeros(512) for i in range(512): X = train[train['wheezy-copper-turtle-magic'] == i].copy() Y = X.pop('target').values X_test = test[test['wheezy-copper-turtle-magic'] == i].copy() idx_train = X.index idx_test = X_test.index X.reset_index(drop=True, inplace=True) X = X[cols].values X_test = X_test[cols].values vt = VarianceThreshold(threshold=2).fit(X) X = vt.transform(X) X_test = vt.transform(X_test) X_all = np.concatenate([X, X_test]) train_size = len(X) test1_size = test[:131073][test[:131073]['wheezy-copper-turtle-magic'] == i].shape[0] compo_cnt = 6 for ii in range(30): gmm = GaussianMixture(n_components=compo_cnt, init_params='random', covariance_type='full', max_iter=100, tol=1e-10, reg_covar=0.0001).fit(X_all) labels = gmm.predict(X_all) cntStd = np.std([len(labels[labels == j]) for j in range(compo_cnt)]) if round(cntStd, 4) == 0.4714: check_labels = labels[:train_size] cvt_labels = np.zeros(len(labels)) for iii in range(compo_cnt): mean_val = Y[check_labels == iii].mean() mean_val = 1 if mean_val > 0.5 else 0 cvt_labels[labels == iii] = mean_val train_err[i] = len(Y[Y != cvt_labels[:train_size]]) if train_err[i] >= 10 and train_err[i] <= 15: train_err[i] = 12.5 exp_err = max(0, (25 - train_err[i]) / (train_size + test1_size)) for iii in range(compo_cnt): mean_val = Y[check_labels == iii].mean() mean_val = 1 - exp_err if mean_val > 0.5 else exp_err cvt_labels[labels == iii] = mean_val check_acc = roc_auc_score(Y, cvt_labels[:train_size]) preds[idx_test] = cvt_labels[train_size:] break sub['target'] = preds sub.to_csv('submission.csv', index=False)
code
16151986/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['wheezy-copper-turtle-magic'] = train['wheezy-copper-turtle-magic'].astype('category') test['wheezy-copper-turtle-magic'] = test['wheezy-copper-turtle-magic'].astype('category') magicNum = 131073 default_cols = [c for c in train.columns if c not in ['id', 'target', 'target_pred', 'wheezy-copper-turtle-magic']] cols = [c for c in default_cols] sub = pd.read_csv('../input/sample_submission.csv') sub.to_csv('submission.csv', index=False) (train.shape, test.shape)
code
73074157/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/batchgradientdescent/ex1data1.csv', header=None) df df.columns = ['Population of City in 10,000s', 'Profit in £10,000s'] plt.xlabel('Population in city in 10,000s') plt.ylabel('Profit in £10,000s') plt.title('Relationship between city size and profit size') plt.plot(df.iloc[:, 0], df.iloc[:, 1], 'ro', mec='k') plt.legend(['Dataset'])
code
73074157/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/batchgradientdescent/ex1data1.csv', header=None) df df.columns = ['Population of City in 10,000s', 'Profit in £10,000s'] df.info()
code
73074157/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/batchgradientdescent/ex1data1.csv', header=None) df
code
73074157/cell_35
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/batchgradientdescent/ex1data1.csv', header=None) df df.columns = ['Population of City in 10,000s', 'Profit in £10,000s'] X = df.iloc[:, :-1] y = df.iloc[:, 1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 / 4, random_state=42) X_train = X_train.reset_index(drop=True) y_train = y_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) y_test = y_test.reset_index(drop=True) m, n = X_train.values.shape o, p = X_test.values.shape X_train = pd.concat((pd.DataFrame(np.ones((m, 1)), columns=['Bias']), X_train), axis=1) X_test = pd.concat((pd.DataFrame(np.ones((o, 1)), columns=['Bias']), X_test), axis=1) pop_prof_model = LinearRegression() pop_prof_model.fit(X_train, y_train) y_pred_train = pop_prof_model.predict(X_train) MSE_train = pop_prof_model.LR_Cost(X_train, y_train) print('Theta estimates are: {}'.format(pop_prof_model.theta)) print('Training dataset mean squared error: {}'.format(MSE_train))
code
73074157/cell_37
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/batchgradientdescent/ex1data1.csv', header=None) df df.columns = ['Population of City in 10,000s', 'Profit in £10,000s'] X = df.iloc[:, :-1] y = df.iloc[:, 1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 / 4, random_state=42) X_train = X_train.reset_index(drop=True) y_train = y_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) y_test = y_test.reset_index(drop=True) m, n = X_train.values.shape o, p = X_test.values.shape X_train = pd.concat((pd.DataFrame(np.ones((m, 1)), columns=['Bias']), X_train), axis=1) X_test = pd.concat((pd.DataFrame(np.ones((o, 1)), columns=['Bias']), X_test), axis=1) pop_prof_model = LinearRegression() pop_prof_model.fit(X_train, y_train) y_pred_train = pop_prof_model.predict(X_train) MSE_train = pop_prof_model.LR_Cost(X_train, y_train) pop_prof_model.fit(X_test, y_test) y_pred_test = pop_prof_model.predict(X_test) MSE_test = pop_prof_model.LR_Cost(X_test, y_test) plt.xlabel('Population in city in 10,000s') plt.ylabel('Profit in £10,000s') plt.title('Relationship between city size and profit size') plt.plot(X_test.iloc[:, 1], y_test, 'ro', mec='k') plt.plot(X_test.iloc[:, 1], y_pred_test, '-b', mec='k') plt.legend(['Dataset', 'Linear Regression'])
code
73074157/cell_36
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/batchgradientdescent/ex1data1.csv', header=None) df df.columns = ['Population of City in 10,000s', 'Profit in £10,000s'] X = df.iloc[:, :-1] y = df.iloc[:, 1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 / 4, random_state=42) X_train = X_train.reset_index(drop=True) y_train = y_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) y_test = y_test.reset_index(drop=True) m, n = X_train.values.shape o, p = X_test.values.shape X_train = pd.concat((pd.DataFrame(np.ones((m, 1)), columns=['Bias']), X_train), axis=1) X_test = pd.concat((pd.DataFrame(np.ones((o, 1)), columns=['Bias']), X_test), axis=1) pop_prof_model = LinearRegression() pop_prof_model.fit(X_train, y_train) y_pred_train = pop_prof_model.predict(X_train) MSE_train = pop_prof_model.LR_Cost(X_train, y_train) pop_prof_model.fit(X_test, y_test) y_pred_test = pop_prof_model.predict(X_test) MSE_test = pop_prof_model.LR_Cost(X_test, y_test) print('Theta estimates are: {}'.format(pop_prof_model.theta)) print('Test dataset mean squared error: {}'.format(MSE_test))
code
17120135/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df_main = pd.read_csv('../input/zomato.csv') df_loc = df_main['location'].value_counts()[:20] df_BTM =df_main.loc[df_main['location']=='BTM'] df_BTM_REST= df_BTM['rest_type'].value_counts() fig = plt.figure(figsize=(20,10)) ax1 = fig.add_subplot(121) sns.barplot(x=df_BTM_REST, y= df_BTM_REST.index,ax=ax1) plt.title('Count of restaurant types in BTM') plt.xlabel('Count') plt.ylabel('Restaurant Name') df_BTM_REST1 = df_BTM_REST[:10] labels = df_BTM_REST1.index explode = (0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0) df_RATE_BTM = df_BTM[['rate', 'rest_type', 'online_order', 'votes', 'book_table', 'approx_cost(for two people)', 'listed_in(type)', 'listed_in(city)']].dropna() df_RATE_BTM['rate'] = df_RATE_BTM['rate'].apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else 0) df_RATE_BTM['approx_cost(for two people)'] = df_RATE_BTM['approx_cost(for two people)'].apply(lambda x: int(x.replace(',', ''))) df_rating = df_BTM['rate'].dropna().apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else np.nan).dropna() f, axes = plt.subplots(1, 2, figsize=(20, 10), sharex=True) sns.despine(left=True) sns.distplot(df_rating, bins=20, ax=axes[0]).set_title('Rating distribution in BTM Region') plt.xlabel('Rating') df_grp = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='votes', ascending=False) sns.distplot(df_grp['rate'], bins=20, ax=axes[1]).set_title('Average Rating distribution in BTM Region')
code
17120135/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_main = pd.read_csv('../input/zomato.csv') df_loc = df_main['location'].value_counts()[:20] plt.figure(figsize=(20, 10)) sns.barplot(x=df_loc, y=df_loc.index) plt.title('Top 20 locations with highest number of Restaurants.') plt.xlabel('Count') plt.ylabel('Restaurant Name')
code