path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90127845/cell_8
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import datetime import os import pandas as pd root = '/kaggle/input/tabular-playground-series-mar-2022' train_df = pd.read_csv(os.path.join(root, 'train.csv')) train_df['datetime'] = pd.to_datetime(train_df.time) train_df['date'] = train_df.datetime.dt.date train_df['time'] = train_df.datetime.dt.time test_df = pd.read_csv(os.path.join(root, 'test.csv')) test_df['datetime'] = pd.to_datetime(test_df.time) test_df['date'] = test_df.datetime.dt.date test_df['time'] = test_df.datetime.dt.time sep_30 = datetime.date(1991, 9, 30) mondays = train_df[train_df.datetime.dt.dayofweek == 0] mondays['is_morning'] = mondays.datetime.dt.hour < 12 labor_day = datetime.date(1991, 9, 2) memorial_day = datetime.date(1991, 5, 27) mondays = mondays[(mondays.date != labor_day) & (mondays.date != memorial_day)] mondays[mondays.datetime.dt.date < sep_30].groupby('date').congestion.mean().plot() plt.title('Congestion by date') plt.ylabel('avg daily congestion') plt.tight_layout() plt.show()
code
90127845/cell_17
[ "image_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor import datetime import os import pandas as pd root = '/kaggle/input/tabular-playground-series-mar-2022' train_df = pd.read_csv(os.path.join(root, 'train.csv')) train_df['datetime'] = pd.to_datetime(train_df.time) train_df['date'] = train_df.datetime.dt.date train_df['time'] = train_df.datetime.dt.time test_df = pd.read_csv(os.path.join(root, 'test.csv')) test_df['datetime'] = pd.to_datetime(test_df.time) test_df['date'] = test_df.datetime.dt.date test_df['time'] = test_df.datetime.dt.time sep_30 = datetime.date(1991, 9, 30) mondays = train_df[train_df.datetime.dt.dayofweek == 0] mondays['is_morning'] = mondays.datetime.dt.hour < 12 labor_day = datetime.date(1991, 9, 2) memorial_day = datetime.date(1991, 5, 27) mondays = mondays[(mondays.date != labor_day) & (mondays.date != memorial_day)] plt.tight_layout() plt.tight_layout() for (x, y), G in mondays.groupby(['x', 'y']): plt.tight_layout() for direction, G in mondays.groupby('direction'): plt.tight_layout() train = mondays[mondays.datetime.dt.date < sep_30] models = {} for (x, y, direction), G in train.groupby(['x', 'y', 'direction']): morning_data = G[G.is_morning] afternoon_data = G[~G.is_morning] X = morning_data.pivot(index='date', columns='time', values='congestion').reset_index().drop(columns=['date']) Y = afternoon_data.pivot(index='date', columns='time', values='congestion').reset_index().drop(columns=['date']) model = KNeighborsRegressor() models[x, y, direction] = model.fit(X, Y)
code
90127845/cell_14
[ "image_output_1.png" ]
import datetime import os import pandas as pd root = '/kaggle/input/tabular-playground-series-mar-2022' train_df = pd.read_csv(os.path.join(root, 'train.csv')) train_df['datetime'] = pd.to_datetime(train_df.time) train_df['date'] = train_df.datetime.dt.date train_df['time'] = train_df.datetime.dt.time test_df = pd.read_csv(os.path.join(root, 'test.csv')) test_df['datetime'] = pd.to_datetime(test_df.time) test_df['date'] = test_df.datetime.dt.date test_df['time'] = test_df.datetime.dt.time sep_30 = datetime.date(1991, 9, 30) mondays = train_df[train_df.datetime.dt.dayofweek == 0] mondays['is_morning'] = mondays.datetime.dt.hour < 12 labor_day = datetime.date(1991, 9, 2) memorial_day = datetime.date(1991, 5, 27) mondays = mondays[(mondays.date != labor_day) & (mondays.date != memorial_day)] plt.tight_layout() plt.tight_layout() for (x, y), G in mondays.groupby(['x', 'y']): plt.tight_layout() for direction, G in mondays.groupby('direction'): G.boxplot(by='time', column='congestion', rot=90, figsize=(12, 5)) plt.title(direction) plt.tight_layout() plt.plot()
code
90127845/cell_10
[ "text_html_output_1.png" ]
import datetime import os import pandas as pd root = '/kaggle/input/tabular-playground-series-mar-2022' train_df = pd.read_csv(os.path.join(root, 'train.csv')) train_df['datetime'] = pd.to_datetime(train_df.time) train_df['date'] = train_df.datetime.dt.date train_df['time'] = train_df.datetime.dt.time test_df = pd.read_csv(os.path.join(root, 'test.csv')) test_df['datetime'] = pd.to_datetime(test_df.time) test_df['date'] = test_df.datetime.dt.date test_df['time'] = test_df.datetime.dt.time sep_30 = datetime.date(1991, 9, 30) mondays = train_df[train_df.datetime.dt.dayofweek == 0] mondays['is_morning'] = mondays.datetime.dt.hour < 12 labor_day = datetime.date(1991, 9, 2) memorial_day = datetime.date(1991, 5, 27) mondays = mondays[(mondays.date != labor_day) & (mondays.date != memorial_day)] plt.tight_layout() mondays[mondays.is_morning].groupby('date').congestion.mean().plot(label='Morning') mondays[~mondays.is_morning].groupby('date').congestion.mean().plot(label='Afternoon') plt.title('Congestion by date') plt.ylabel('avg daily congestion') plt.legend() plt.tight_layout() plt.show()
code
90127845/cell_12
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import datetime import os import pandas as pd root = '/kaggle/input/tabular-playground-series-mar-2022' train_df = pd.read_csv(os.path.join(root, 'train.csv')) train_df['datetime'] = pd.to_datetime(train_df.time) train_df['date'] = train_df.datetime.dt.date train_df['time'] = train_df.datetime.dt.time test_df = pd.read_csv(os.path.join(root, 'test.csv')) test_df['datetime'] = pd.to_datetime(test_df.time) test_df['date'] = test_df.datetime.dt.date test_df['time'] = test_df.datetime.dt.time sep_30 = datetime.date(1991, 9, 30) mondays = train_df[train_df.datetime.dt.dayofweek == 0] mondays['is_morning'] = mondays.datetime.dt.hour < 12 labor_day = datetime.date(1991, 9, 2) memorial_day = datetime.date(1991, 5, 27) mondays = mondays[(mondays.date != labor_day) & (mondays.date != memorial_day)] plt.tight_layout() plt.tight_layout() for (x, y), G in mondays.groupby(['x', 'y']): G.boxplot(by='time', column='congestion', rot=90, figsize=(12, 5)) plt.title('{}, {}'.format(x, y)) plt.tight_layout() plt.plot()
code
16154359/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_selection import VarianceThreshold from sklearn.mixture import GaussianMixture import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['wheezy-copper-turtle-magic'] = train['wheezy-copper-turtle-magic'].astype('category') test['wheezy-copper-turtle-magic'] = test['wheezy-copper-turtle-magic'].astype('category') magicNum = 131073 default_cols = [c for c in train.columns if c not in ['id', 'target', 'target_pred', 'wheezy-copper-turtle-magic']] cols = [c for c in default_cols] sub = pd.read_csv('../input/sample_submission.csv') sub.to_csv('submission.csv', index=False) (train.shape, test.shape) if sub.shape[0] == magicNum: [].shape preds = np.zeros(len(test)) train_err = np.zeros(512) test_err = np.zeros(512) for i in range(512): X = train[train['wheezy-copper-turtle-magic'] == i].copy() Y = X.pop('target').values X_test = test[test['wheezy-copper-turtle-magic'] == i].copy() idx_train = X.index idx_test = X_test.index X.reset_index(drop=True, inplace=True) X = X[cols].values X_test = X_test[cols].values vt = VarianceThreshold(threshold=2).fit(X) X = vt.transform(X) X_test = vt.transform(X_test) X_all = np.concatenate([X, X_test]) train_size = len(X) test1_size = test[:131073][test[:131073]['wheezy-copper-turtle-magic'] == i].shape[0] compo_cnt = 6 for ii in range(30): gmm = GaussianMixture(n_components=compo_cnt, init_params='random', covariance_type='full', max_iter=100, tol=1e-10, reg_covar=0.0001).fit(X_all) labels = gmm.predict(X_all) cntStd = np.std([len(labels[labels == j]) for j in range(compo_cnt)]) if round(cntStd, 4) == 0.4714: check_labels = labels[:train_size] cvt_labels = np.zeros(len(labels)) for iii in range(compo_cnt): mean_val = Y[check_labels == iii].mean() mean_val = 1 if mean_val > 0.5 else 0 cvt_labels[labels == iii] = mean_val train_err[i] = len(Y[Y != cvt_labels[:train_size]]) if train_err[i] >= 10 and train_err[i] <= 15: train_err[i] = 12.5 exp_err = max(0, (25 - train_err[i]) / (train_size + test1_size)) for iii in range(compo_cnt): mean_val = Y[check_labels == iii].mean() mean_val = 1 - exp_err if mean_val > 0.5 else exp_err cvt_labels[labels == iii] = mean_val preds[idx_test] = cvt_labels[train_size:] break sub['target'] = preds sub.to_csv('submission.csv', index=False)
code
16154359/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import roc_auc_score y_perfect = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_flliped = [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1] roc_auc_score(y_perfect, y_flliped) y_preds = [0.33, 0.33, 0.33, 0.5, 0.5, 0, 0, 0, 0, 0, 1, 1, 0.5, 0.5, 1, 1, 1, 0.66, 0.66, 0.66] roc_auc_score(y_flliped, y_preds)
code
16154359/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['wheezy-copper-turtle-magic'] = train['wheezy-copper-turtle-magic'].astype('category') test['wheezy-copper-turtle-magic'] = test['wheezy-copper-turtle-magic'].astype('category') magicNum = 131073 default_cols = [c for c in train.columns if c not in ['id', 'target', 'target_pred', 'wheezy-copper-turtle-magic']] cols = [c for c in default_cols] sub = pd.read_csv('../input/sample_submission.csv') sub.to_csv('submission.csv', index=False) (train.shape, test.shape)
code
16154359/cell_5
[ "text_plain_output_1.png" ]
from sklearn.metrics import roc_auc_score y_perfect = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_flliped = [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1] roc_auc_score(y_perfect, y_flliped)
code
106204398/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x='n_cores', y='price_range', data=train, kind='bar', height=6, palette='muted') g.despine(left=True) g = g.set_ylabels('price_range')
code
106204398/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.describe()
code
106204398/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test) confusion_matrix(y_test, clf.predict(X_test)) kf = KFold(n_splits=5) kf.get_n_splits(X) training_scores = [] testing_scores = [] for fold, (train_index, test_index) in enumerate(kf.split(X)): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] clf = LogisticRegression(random_state=0).fit(X_train, y_train) print(f'Fold {fold + 1} -> The score of the training data set is: ', clf.score(X_train, y_train)) print(f'Fold {fold + 1} -> The score of the testing (out of fold) data set is: ', clf.score(X_test, y_test)) training_scores.append(clf.score(X_train, y_train)) testing_scores.append(clf.score(X_test, y_test)) print('\n') print(f'The average training set accuracy is: {sum(training_scores) / len(training_scores)}') print(f'The average testing set accuracy is: {sum(testing_scores) / len(testing_scores)}')
code
106204398/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any()
code
106204398/cell_19
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test) confusion_matrix(y_test, clf.predict(X_test))
code
106204398/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LogisticRegression import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106204398/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x='blue', y='price_range', data=train, kind='bar', height=6, palette='muted') g.despine(left=True) g = g.set_ylabels('price_range')
code
106204398/cell_18
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test)
code
106204398/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x='wifi', y='price_range', data=train, kind='bar', height=6, palette='muted') g.despine(left=True) g = g.set_ylabels('price_range')
code
106204398/cell_15
[ "text_plain_output_1.png" ]
X_test
code
106204398/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.head()
code
106204398/cell_17
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train)
code
106204398/cell_14
[ "text_plain_output_1.png" ]
X_train
code
106204398/cell_22
[ "image_output_1.png" ]
from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) kf = KFold(n_splits=5) kf.get_n_splits(X)
code
106204398/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(), cmap='BrBG', annot=True, linewidths=2.0)
code
106204398/cell_12
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) print(X.shape) print(y.shape)
code
106204398/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.info()
code
106211686/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum()
code
106211686/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False)
code
106211686/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.head()
code
106211686/cell_56
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn.metrics import explained_variance_score, r2_score from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(X_train, y_train) reg_pred = reg.predict(X_test) from sklearn.metrics import explained_variance_score, r2_score explained_variance_score(reg_pred, y_test)
code
106211686/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train['country'].value_counts()
code
106211686/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['store'].unique()
code
106211686/cell_40
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train['Covid'].value_counts()
code
106211686/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train[60000:60005]
code
106211686/cell_48
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train.groupby(train['Covid'])['num_sold'].mean() train.drop('date', axis=1, inplace=True) train.drop('country', axis=1, inplace=True) train.drop('store', axis=1, inplace=True) train.drop('product', axis=1, inplace=True) train
code
106211686/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train.groupby(train['Covid'])['num_sold'].mean()
code
106211686/cell_60
[ "text_plain_output_1.png" ]
from sklearn.metrics import explained_variance_score, r2_score from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor tree = DecisionTreeRegressor(splitter='random', max_depth=20, max_features='sqrt') tree.fit(X_train, y_train) tree_pred = tree.predict(X_test) print(explained_variance_score(tree_pred, y_test), r2_score(y_test, tree_pred))
code
106211686/cell_52
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train.groupby(train['Covid'])['num_sold'].mean() train.drop('date', axis=1, inplace=True) train.drop('country', axis=1, inplace=True) train.drop('store', axis=1, inplace=True) train.drop('product', axis=1, inplace=True) train.drop('date_time', axis=1, inplace=True) train.drop('row_id', axis=1, inplace=True) train.columns
code
106211686/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106211686/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['country'].unique()
code
106211686/cell_51
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train.groupby(train['Covid'])['num_sold'].mean() train.drop('date', axis=1, inplace=True) train.drop('country', axis=1, inplace=True) train.drop('store', axis=1, inplace=True) train.drop('product', axis=1, inplace=True) train.drop('date_time', axis=1, inplace=True) train.drop('row_id', axis=1, inplace=True) train
code
106211686/cell_59
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn import linear_model from sklearn.metrics import explained_variance_score, r2_score from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(X_train, y_train) reg_pred = reg.predict(X_test) from sklearn import linear_model ridge = linear_model.Ridge(alpha=0.5) ridge.fit(X_train, y_train) ridge_pred = ridge.predict(X_test) explained_variance_score(ridge_pred, y_test) from sklearn import linear_model lasso = linear_model.Lasso(alpha=0.35) lasso.fit(X_train, y_train) lasso_pred = lasso.predict(X_test) explained_variance_score(lasso_pred, y_test)
code
106211686/cell_58
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn.metrics import explained_variance_score, r2_score from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(X_train, y_train) reg_pred = reg.predict(X_test) from sklearn import linear_model ridge = linear_model.Ridge(alpha=0.5) ridge.fit(X_train, y_train) ridge_pred = ridge.predict(X_test) explained_variance_score(ridge_pred, y_test)
code
106211686/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train['product'].unique()
code
106211686/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False)
code
106211686/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() for x in train[train['year'] == 2020]['month'].index: if train['month'].loc[x] == 1: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 6: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 7: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 8: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 9: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 10: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 11: train['Covid'].loc[x] = 0 elif train['month'].loc[x] == 12: train['Covid'].loc[x] = 0 else: train['Covid'].loc[x] = 1
code
106211686/cell_3
[ "text_plain_output_1.png" ]
import random import random random.seed(10) print(random.random())
code
106211686/cell_46
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['dayofweek'])['num_sold'].mean().sort_values(ascending=False) train.isnull().sum() train.groupby(train['Covid'])['num_sold'].mean() train.head()
code
106211686/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['month'])['num_sold'].mean().sort_values(ascending=False) train.groupby(train['year'])['num_sold'].mean().sort_values(ascending=False)
code
106211686/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.groupby(train['day'])['num_sold'].mean().sort_values(ascending=False)
code
106211686/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/sample_submission.csv') train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2022/test.csv') train.info()
code
33106742/cell_25
[ "image_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) number_of_trials = 1000 for i in range(number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) plt.plot(price_series) plt.show()
code
33106742/cell_23
[ "image_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) plt.plot(price_series) plt.show()
code
33106742/cell_30
[ "text_plain_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) number_of_trials = 1000 for i in range(number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points = [] larger_number_of_trials = 9001 for i in range(larger_number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points.append(price_series[-1]) expected_ending_price_point = round(np.mean(ending_price_points), 2) top_ten = np.percentile(ending_price_points, 100 - 10) bottom_ten = np.percentile(ending_price_points, 10) print('Top 10% : ', str(round(top_ten, 2))) print('Bottom 10% : ', str(round(bottom_ten, 2)))
code
33106742/cell_29
[ "text_plain_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) number_of_trials = 1000 for i in range(number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points = [] larger_number_of_trials = 9001 for i in range(larger_number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points.append(price_series[-1]) expected_ending_price_point = round(np.mean(ending_price_points), 2) population_mean = (cagr + 1) * ibm['Adj Close'][-1] print('Sample Mean : ', str(expected_ending_price_point)) print('Population Mean: ', str(round(population_mean, 2))) print('Percent Difference : ', str(round((population_mean - expected_ending_price_point) / population_mean * 100, 2)), '%')
code
33106742/cell_26
[ "image_output_2.png", "image_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) number_of_trials = 1000 for i in range(number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points = [] larger_number_of_trials = 9001 for i in range(larger_number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) plt.plot(price_series) ending_price_points.append(price_series[-1]) plt.show() plt.hist(ending_price_points, bins=50) plt.show()
code
33106742/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import math import matplotlib.pyplot as plt import numpy as np from pandas_datareader import data
code
33106742/cell_28
[ "text_plain_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) number_of_trials = 1000 for i in range(number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points = [] larger_number_of_trials = 9001 for i in range(larger_number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points.append(price_series[-1]) expected_ending_price_point = round(np.mean(ending_price_points), 2) print('Expected Ending Price Point : ', str(expected_ending_price_point))
code
33106742/cell_17
[ "text_plain_output_1.png" ]
from pandas_datareader import data import math ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) print('cagr (mean returns) : ', str(round(cagr, 4))) print('vol (standard deviation of return : )', str(round(vol, 4)))
code
33106742/cell_31
[ "image_output_1.png" ]
from pandas_datareader import data import math import matplotlib.pyplot as plt import numpy as np ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 vol = ibm['Adj Close'].pct_change().std() number_of_trading_days = 252 vol = vol * math.sqrt(number_of_trading_days) daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) number_of_trials = 1000 for i in range(number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points = [] larger_number_of_trials = 9001 for i in range(larger_number_of_trials): daily_return_percentages = np.random.normal(cagr / number_of_trading_days, vol / math.sqrt(number_of_trading_days), number_of_trading_days) + 1 price_series = [ibm['Adj Close'][-1]] for drp in daily_return_percentages: price_series.append(price_series[-1] * drp) ending_price_points.append(price_series[-1]) expected_ending_price_point = round(np.mean(ending_price_points), 2) top_ten = np.percentile(ending_price_points, 100 - 10) bottom_ten = np.percentile(ending_price_points, 10) plt.hist(ending_price_points, bins=100) plt.axvline(top_ten, color='r', linestyle='dashed', linewidth=2) plt.axvline(bottom_ten, color='r', linestyle='dashed', linewidth=2) plt.axhline(ibm['Adj Close'][-1], color='g', linestyle='dashed', linewidth=2) plt.show()
code
33106742/cell_14
[ "text_plain_output_1.png" ]
from pandas_datareader import data ibm = data.DataReader('IBM', 'yahoo', start='1/1/2000') time_elapsed = (ibm.index[-1] - ibm.index[0]).days price_ratio = ibm['Adj Close'][-1] / ibm['Adj Close'][1] inverse_number_of_years = 365.0 / time_elapsed cagr = price_ratio ** inverse_number_of_years - 1 print(cagr)
code
2033418/cell_2
[ "text_plain_output_1.png" ]
from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import nltk import numpy as np import pandas as pd import re import tensorflow as tf import re import numpy as np import pandas as pd import nltk from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import tensorflow as tf data = pd.read_csv('../input/TechCrunch.csv', sep=',', error_bad_lines=False, encoding='ISO-8859-1') data['title'] = data['title'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) data['url'] = data['url'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) def combineProperNouns(a): y = 0 while y <= len(a) - 2: if a[y][0].isupper() == True and a[y + 1][0].isupper() == True: a[y] = str(a[y]) + '+' + str(a[y + 1]) a[y + 1:] = a[y + 2:] else: y = y + 1 return a def recreateDataWithCombinedProperNouns(data): tempData = [] for x in data.split('.'): tempPhrase = [] for y in x.split(','): z = y.split(' ') z = [a for a in z if len(a) > 0] tempPhrase.append(' '.join(combineProperNouns(z))) tempData.append(','.join(tempPhrase)) data = '.'.join(tempData) return data def removeDotsFromAcronyms(data): counter = 0 while counter < len(data) - 2: if data[counter] == '.' and data[counter + 2] == '.': data = data[:counter] + str(data[counter + 1]) + ' ' + data[counter + 3:] counter = counter + 1 elif data[counter] == '.' and data[counter - 1].isupper() == True: data = data[:counter] + data[counter + 1:] else: counter = counter + 1 return data def stemAndLemmatize(data, columnNames): wordnet_lemmatizer = WordNetLemmatizer() porter_stemmer = PorterStemmer() for columnName in columnNames: data[columnName] = data[columnName].map(lambda x: ' '.join([porter_stemmer.stem(y) for y in x.split(' ')])) data[columnName] = data[columnName].map(lambda x: ' '.join([wordnet_lemmatizer.lemmatize(y) for y in x.split(' ')])) return data data['newTitle'] = data['title'].map(lambda x: recreateDataWithCombinedProperNouns(x)) data = stemAndLemmatize(data, ['title']) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] not in ['DT', 'IN', 'PDT', 'TO']])) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if len(y) > 1])) tagList = ['NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG'] data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] in tagList])) wordList = set([y for x in data['newTitle'].values for y in x.split(' ')]) print('The number of words are {}'.format(len(wordList))) vocab_size = len(wordList) word2int = {} int2word = {} for i, word in enumerate(wordList): word2int[word] = i int2word[word] = i words = [] WINDOW_SIZE = 2 for sentence in data['newTitle'].values: newSentence = sentence.split(' ') for word_index, word in enumerate(newSentence): for nb_word in newSentence[max(word_index - WINDOW_SIZE, 0):min(word_index + WINDOW_SIZE, len(newSentence)) + 1]: if nb_word != word: words.append([word, nb_word]) def to_one_hot(data_point_index, vocab_size): temp = np.zeros(vocab_size) temp[data_point_index] = 1 return temp x_train = [] y_train = [] for data_word in words: x_train.append(to_one_hot(word2int[data_word[0]], vocab_size)) y_train.append(to_one_hot(word2int[data_word[1]], vocab_size)) x_train = np.asarray(x_train) y_train = np.asarray(y_train) x = tf.placeholder(tf.float32, shape=(None, vocab_size)) y_label = tf.placeholder(tf.float32, shape=(None, vocab_size)) EMBEDDING_DIM = 5 W1 = tf.Variable(tf.random_normal([vocab_size, EMBEDDING_DIM])) b1 = tf.Variable(tf.random_normal([EMBEDDING_DIM])) hidden_representation = tf.add(tf.matmul(x, W1), b1) W2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, vocab_size])) b2 = tf.Variable(tf.random_normal([vocab_size])) prediction = tf.nn.softmax(tf.add(tf.matmul(hidden_representation, W2), b2))
code
2033418/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import nltk import pandas as pd import re import re import numpy as np import pandas as pd import nltk from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import tensorflow as tf data = pd.read_csv('../input/TechCrunch.csv', sep=',', error_bad_lines=False, encoding='ISO-8859-1') data['title'] = data['title'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) data['url'] = data['url'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) def combineProperNouns(a): y = 0 while y <= len(a) - 2: if a[y][0].isupper() == True and a[y + 1][0].isupper() == True: a[y] = str(a[y]) + '+' + str(a[y + 1]) a[y + 1:] = a[y + 2:] else: y = y + 1 return a def recreateDataWithCombinedProperNouns(data): tempData = [] for x in data.split('.'): tempPhrase = [] for y in x.split(','): z = y.split(' ') z = [a for a in z if len(a) > 0] tempPhrase.append(' '.join(combineProperNouns(z))) tempData.append(','.join(tempPhrase)) data = '.'.join(tempData) return data def removeDotsFromAcronyms(data): counter = 0 while counter < len(data) - 2: if data[counter] == '.' and data[counter + 2] == '.': data = data[:counter] + str(data[counter + 1]) + ' ' + data[counter + 3:] counter = counter + 1 elif data[counter] == '.' and data[counter - 1].isupper() == True: data = data[:counter] + data[counter + 1:] else: counter = counter + 1 return data def stemAndLemmatize(data, columnNames): wordnet_lemmatizer = WordNetLemmatizer() porter_stemmer = PorterStemmer() for columnName in columnNames: data[columnName] = data[columnName].map(lambda x: ' '.join([porter_stemmer.stem(y) for y in x.split(' ')])) data[columnName] = data[columnName].map(lambda x: ' '.join([wordnet_lemmatizer.lemmatize(y) for y in x.split(' ')])) return data data['newTitle'] = data['title'].map(lambda x: recreateDataWithCombinedProperNouns(x)) data = stemAndLemmatize(data, ['title']) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] not in ['DT', 'IN', 'PDT', 'TO']])) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if len(y) > 1]))
code
2033418/cell_3
[ "text_plain_output_1.png" ]
from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import nltk import numpy as np import pandas as pd import re import tensorflow as tf import re import numpy as np import pandas as pd import nltk from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import tensorflow as tf data = pd.read_csv('../input/TechCrunch.csv', sep=',', error_bad_lines=False, encoding='ISO-8859-1') data['title'] = data['title'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) data['url'] = data['url'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) def combineProperNouns(a): y = 0 while y <= len(a) - 2: if a[y][0].isupper() == True and a[y + 1][0].isupper() == True: a[y] = str(a[y]) + '+' + str(a[y + 1]) a[y + 1:] = a[y + 2:] else: y = y + 1 return a def recreateDataWithCombinedProperNouns(data): tempData = [] for x in data.split('.'): tempPhrase = [] for y in x.split(','): z = y.split(' ') z = [a for a in z if len(a) > 0] tempPhrase.append(' '.join(combineProperNouns(z))) tempData.append(','.join(tempPhrase)) data = '.'.join(tempData) return data def removeDotsFromAcronyms(data): counter = 0 while counter < len(data) - 2: if data[counter] == '.' and data[counter + 2] == '.': data = data[:counter] + str(data[counter + 1]) + ' ' + data[counter + 3:] counter = counter + 1 elif data[counter] == '.' and data[counter - 1].isupper() == True: data = data[:counter] + data[counter + 1:] else: counter = counter + 1 return data def stemAndLemmatize(data, columnNames): wordnet_lemmatizer = WordNetLemmatizer() porter_stemmer = PorterStemmer() for columnName in columnNames: data[columnName] = data[columnName].map(lambda x: ' '.join([porter_stemmer.stem(y) for y in x.split(' ')])) data[columnName] = data[columnName].map(lambda x: ' '.join([wordnet_lemmatizer.lemmatize(y) for y in x.split(' ')])) return data data['newTitle'] = data['title'].map(lambda x: recreateDataWithCombinedProperNouns(x)) data = stemAndLemmatize(data, ['title']) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] not in ['DT', 'IN', 'PDT', 'TO']])) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if len(y) > 1])) tagList = ['NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG'] data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] in tagList])) wordList = set([y for x in data['newTitle'].values for y in x.split(' ')]) vocab_size = len(wordList) word2int = {} int2word = {} for i, word in enumerate(wordList): word2int[word] = i int2word[word] = i words = [] WINDOW_SIZE = 2 for sentence in data['newTitle'].values: newSentence = sentence.split(' ') for word_index, word in enumerate(newSentence): for nb_word in newSentence[max(word_index - WINDOW_SIZE, 0):min(word_index + WINDOW_SIZE, len(newSentence)) + 1]: if nb_word != word: words.append([word, nb_word]) def to_one_hot(data_point_index, vocab_size): temp = np.zeros(vocab_size) temp[data_point_index] = 1 return temp x_train = [] y_train = [] for data_word in words: x_train.append(to_one_hot(word2int[data_word[0]], vocab_size)) y_train.append(to_one_hot(word2int[data_word[1]], vocab_size)) x_train = np.asarray(x_train) y_train = np.asarray(y_train) x = tf.placeholder(tf.float32, shape=(None, vocab_size)) y_label = tf.placeholder(tf.float32, shape=(None, vocab_size)) EMBEDDING_DIM = 5 W1 = tf.Variable(tf.random_normal([vocab_size, EMBEDDING_DIM])) b1 = tf.Variable(tf.random_normal([EMBEDDING_DIM])) hidden_representation = tf.add(tf.matmul(x, W1), b1) W2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, vocab_size])) b2 = tf.Variable(tf.random_normal([vocab_size])) prediction = tf.nn.softmax(tf.add(tf.matmul(hidden_representation, W2), b2)) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(prediction), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy_loss) n_iters = 10 print('We will start training now') for _ in range(n_iters): sess.run(train_step, feed_dict={x: x_train, y_label: y_train}) print('loss is : ', sess.run(cross_entropy_loss, feed_dict={x: x_train, y_label: y_train}))
code
2033418/cell_5
[ "image_output_2.png", "image_output_1.png" ]
from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer from sklearn import preprocessing from sklearn.manifold import TSNE import matplotlib.pyplot as plt import nltk import numpy as np import pandas as pd import re import tensorflow as tf import re import numpy as np import pandas as pd import nltk from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import tensorflow as tf data = pd.read_csv('../input/TechCrunch.csv', sep=',', error_bad_lines=False, encoding='ISO-8859-1') data['title'] = data['title'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) data['url'] = data['url'].map(lambda x: re.sub('[^\\x00-\\x7F]+', ' ', x)) def combineProperNouns(a): y = 0 while y <= len(a) - 2: if a[y][0].isupper() == True and a[y + 1][0].isupper() == True: a[y] = str(a[y]) + '+' + str(a[y + 1]) a[y + 1:] = a[y + 2:] else: y = y + 1 return a def recreateDataWithCombinedProperNouns(data): tempData = [] for x in data.split('.'): tempPhrase = [] for y in x.split(','): z = y.split(' ') z = [a for a in z if len(a) > 0] tempPhrase.append(' '.join(combineProperNouns(z))) tempData.append(','.join(tempPhrase)) data = '.'.join(tempData) return data def removeDotsFromAcronyms(data): counter = 0 while counter < len(data) - 2: if data[counter] == '.' and data[counter + 2] == '.': data = data[:counter] + str(data[counter + 1]) + ' ' + data[counter + 3:] counter = counter + 1 elif data[counter] == '.' and data[counter - 1].isupper() == True: data = data[:counter] + data[counter + 1:] else: counter = counter + 1 return data def stemAndLemmatize(data, columnNames): wordnet_lemmatizer = WordNetLemmatizer() porter_stemmer = PorterStemmer() for columnName in columnNames: data[columnName] = data[columnName].map(lambda x: ' '.join([porter_stemmer.stem(y) for y in x.split(' ')])) data[columnName] = data[columnName].map(lambda x: ' '.join([wordnet_lemmatizer.lemmatize(y) for y in x.split(' ')])) return data data['newTitle'] = data['title'].map(lambda x: recreateDataWithCombinedProperNouns(x)) data = stemAndLemmatize(data, ['title']) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] not in ['DT', 'IN', 'PDT', 'TO']])) data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if len(y) > 1])) tagList = ['NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG'] data['newTitle'] = data['newTitle'].map(lambda x: ' '.join([y for y in x.split(' ') if nltk.pos_tag(y.split())[0][1] in tagList])) wordList = set([y for x in data['newTitle'].values for y in x.split(' ')]) vocab_size = len(wordList) word2int = {} int2word = {} for i, word in enumerate(wordList): word2int[word] = i int2word[word] = i words = [] WINDOW_SIZE = 2 for sentence in data['newTitle'].values: newSentence = sentence.split(' ') for word_index, word in enumerate(newSentence): for nb_word in newSentence[max(word_index - WINDOW_SIZE, 0):min(word_index + WINDOW_SIZE, len(newSentence)) + 1]: if nb_word != word: words.append([word, nb_word]) def to_one_hot(data_point_index, vocab_size): temp = np.zeros(vocab_size) temp[data_point_index] = 1 return temp x_train = [] y_train = [] for data_word in words: x_train.append(to_one_hot(word2int[data_word[0]], vocab_size)) y_train.append(to_one_hot(word2int[data_word[1]], vocab_size)) x_train = np.asarray(x_train) y_train = np.asarray(y_train) x = tf.placeholder(tf.float32, shape=(None, vocab_size)) y_label = tf.placeholder(tf.float32, shape=(None, vocab_size)) EMBEDDING_DIM = 5 W1 = tf.Variable(tf.random_normal([vocab_size, EMBEDDING_DIM])) b1 = tf.Variable(tf.random_normal([EMBEDDING_DIM])) hidden_representation = tf.add(tf.matmul(x, W1), b1) W2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, vocab_size])) b2 = tf.Variable(tf.random_normal([vocab_size])) prediction = tf.nn.softmax(tf.add(tf.matmul(hidden_representation, W2), b2)) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(prediction), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy_loss) n_iters = 10 for _ in range(n_iters): sess.run(train_step, feed_dict={x: x_train, y_label: y_train}) vectors = sess.run(W1 + b1) from sklearn.manifold import TSNE model = TSNE(n_components=2, random_state=0) vectors = model.fit_transform(vectors) from sklearn import preprocessing normalizer = preprocessing.Normalizer() vectors = normalizer.fit_transform(vectors, 'l2') wordList = list(set([y for x in data['newTitle'].values for y in x.split(' ')])) import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(10, 5)) for word in wordList[0:100]: ax.annotate(word, (vectors[word2int[word]][0], vectors[word2int[word]][1])) plt.show() fig, ax = plt.subplots(figsize=(10, 5)) for word in wordList[100:200]: ax.annotate(word, (vectors[word2int[word]][0], vectors[word2int[word]][1])) plt.show()
code
16147265/cell_1
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input'))
code
16147265/cell_8
[ "text_plain_output_1.png" ]
print('End')
code
16147265/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train_data.csv') test_df = pd.read_csv('../input/test_data.csv')
code
121149609/cell_13
[ "text_plain_output_1.png" ]
from PIL import Image from collections import Counter from torch.utils.data import DataLoader,Dataset import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import pandas as pd import spacy import torch import torchvision.transforms as T import pandas as pd caption_file = data_location + '/captions.txt' df = pd.read_csv(caption_file) import matplotlib.pyplot as plt import matplotlib.image as mpimg data_idx = 56 image_path = data_location + '/Images/' + df.iloc[data_idx, 0] img = mpimg.imread(image_path) spacy_eng = spacy.load('en') text = 'This is a good place to find a city' [token.text.lower() for token in spacy_eng.tokenizer(text)] class Vocabulary: def __init__(self, freq_threshold): self.itos = {0: '<PAD>', 1: '<SOS>', 2: '<EOS>', 3: '<UNK>'} self.stoi = {v: k for k, v in self.itos.items()} self.freq_threshold = freq_threshold def __len__(self): return len(self.itos) @staticmethod def tokenize(text): return [token.text.lower() for token in spacy_eng.tokenizer(text)] def build_vocab(self, sentence_list): frequencies = Counter() idx = 4 for sentence in sentence_list: for word in self.tokenize(sentence): frequencies[word] += 1 if frequencies[word] == self.freq_threshold: self.stoi[word] = idx self.itos[idx] = word idx += 1 def numericalize(self, text): """ For each word in the text corresponding index token for that word form the vocab built as list """ tokenized_text = self.tokenize(text) return [self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in tokenized_text] class FlickrDataset(Dataset): """ FlickrDataset """ def __init__(self, root_dir, captions_file, transform=None, freq_threshold=5): self.root_dir = root_dir self.df = pd.read_csv(caption_file) self.transform = transform self.imgs = self.df['image'] self.captions = self.df['caption'] self.vocab = Vocabulary(freq_threshold) self.vocab.build_vocab(self.captions.tolist()) def __len__(self): return len(self.df) def __getitem__(self, idx): caption = self.captions[idx] img_name = self.imgs[idx] img_location = os.path.join(self.root_dir, img_name) img = Image.open(img_location).convert('RGB') if self.transform is not None: img = self.transform(img) caption_vec = [] caption_vec += [self.vocab.stoi['<SOS>']] caption_vec += self.vocab.numericalize(caption) caption_vec += [self.vocab.stoi['<EOS>']] return (img, torch.tensor(caption_vec)) transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()]) def show_image(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) plt.pause(0.001) dataset = FlickrDataset(root_dir=data_location + '/Images', captions_file=data_location + '/captions.txt', transform=transforms) img, caps = dataset[80] show_image(img, 'Image') print('Token:', caps) print('Sentence:') print([dataset.vocab.itos[token] for token in caps.tolist()])
code
121149609/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
v = Vocabulary(freq_threshold=1) v.build_vocab(['This is a good place to find a city']) print(v.stoi) print(v.numericalize('This is a good place to find a city here!!'))
code
121149609/cell_4
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.image as mpimg import matplotlib.pyplot as plt import pandas as pd import pandas as pd caption_file = data_location + '/captions.txt' df = pd.read_csv(caption_file) import matplotlib.pyplot as plt import matplotlib.image as mpimg data_idx = 56 image_path = data_location + '/Images/' + df.iloc[data_idx, 0] img = mpimg.imread(image_path) plt.imshow(img) plt.show() for i in range(data_idx, data_idx + 5): print('Caption:', df.iloc[i, 1])
code
121149609/cell_2
[ "text_plain_output_1.png" ]
#location of the data data_location = "../input/flickr8k" !ls $data_location
code
121149609/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from collections import Counter from torch.nn.utils.rnn import pad_sequence from torch.utils.data import DataLoader,Dataset from torch.utils.data import DataLoader,Dataset import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import pandas as pd import spacy import torch import torch import pandas as pd caption_file = data_location + '/captions.txt' df = pd.read_csv(caption_file) import matplotlib.pyplot as plt import matplotlib.image as mpimg data_idx = 56 image_path = data_location + '/Images/' + df.iloc[data_idx, 0] img = mpimg.imread(image_path) spacy_eng = spacy.load('en') text = 'This is a good place to find a city' [token.text.lower() for token in spacy_eng.tokenizer(text)] class Vocabulary: def __init__(self, freq_threshold): self.itos = {0: '<PAD>', 1: '<SOS>', 2: '<EOS>', 3: '<UNK>'} self.stoi = {v: k for k, v in self.itos.items()} self.freq_threshold = freq_threshold def __len__(self): return len(self.itos) @staticmethod def tokenize(text): return [token.text.lower() for token in spacy_eng.tokenizer(text)] def build_vocab(self, sentence_list): frequencies = Counter() idx = 4 for sentence in sentence_list: for word in self.tokenize(sentence): frequencies[word] += 1 if frequencies[word] == self.freq_threshold: self.stoi[word] = idx self.itos[idx] = word idx += 1 def numericalize(self, text): """ For each word in the text corresponding index token for that word form the vocab built as list """ tokenized_text = self.tokenize(text) return [self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in tokenized_text] class FlickrDataset(Dataset): """ FlickrDataset """ def __init__(self, root_dir, captions_file, transform=None, freq_threshold=5): self.root_dir = root_dir self.df = pd.read_csv(caption_file) self.transform = transform self.imgs = self.df['image'] self.captions = self.df['caption'] self.vocab = Vocabulary(freq_threshold) self.vocab.build_vocab(self.captions.tolist()) def __len__(self): return len(self.df) def __getitem__(self, idx): caption = self.captions[idx] img_name = self.imgs[idx] img_location = os.path.join(self.root_dir, img_name) img = Image.open(img_location).convert('RGB') if self.transform is not None: img = self.transform(img) caption_vec = [] caption_vec += [self.vocab.stoi['<SOS>']] caption_vec += self.vocab.numericalize(caption) caption_vec += [self.vocab.stoi['<EOS>']] return (img, torch.tensor(caption_vec)) class CapsCollate: """ Collate to apply the padding to the captions with dataloader """ def __init__(self, pad_idx, batch_first=False): self.pad_idx = pad_idx self.batch_first = batch_first def __call__(self, batch): imgs = [item[0].unsqueeze(0) for item in batch] imgs = torch.cat(imgs, dim=0) targets = [item[1] for item in batch] targets = pad_sequence(targets, batch_first=self.batch_first, padding_value=self.pad_idx) return (imgs, targets) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device
code
121149609/cell_7
[ "text_plain_output_1.png" ]
import spacy spacy_eng = spacy.load('en') text = 'This is a good place to find a city' [token.text.lower() for token in spacy_eng.tokenizer(text)]
code
121149609/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from PIL import Image from collections import Counter from torch.utils.data import DataLoader,Dataset import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import pandas as pd import spacy import torch import torchvision.transforms as T import pandas as pd caption_file = data_location + '/captions.txt' df = pd.read_csv(caption_file) import matplotlib.pyplot as plt import matplotlib.image as mpimg data_idx = 56 image_path = data_location + '/Images/' + df.iloc[data_idx, 0] img = mpimg.imread(image_path) spacy_eng = spacy.load('en') text = 'This is a good place to find a city' [token.text.lower() for token in spacy_eng.tokenizer(text)] class Vocabulary: def __init__(self, freq_threshold): self.itos = {0: '<PAD>', 1: '<SOS>', 2: '<EOS>', 3: '<UNK>'} self.stoi = {v: k for k, v in self.itos.items()} self.freq_threshold = freq_threshold def __len__(self): return len(self.itos) @staticmethod def tokenize(text): return [token.text.lower() for token in spacy_eng.tokenizer(text)] def build_vocab(self, sentence_list): frequencies = Counter() idx = 4 for sentence in sentence_list: for word in self.tokenize(sentence): frequencies[word] += 1 if frequencies[word] == self.freq_threshold: self.stoi[word] = idx self.itos[idx] = word idx += 1 def numericalize(self, text): """ For each word in the text corresponding index token for that word form the vocab built as list """ tokenized_text = self.tokenize(text) return [self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in tokenized_text] class FlickrDataset(Dataset): """ FlickrDataset """ def __init__(self, root_dir, captions_file, transform=None, freq_threshold=5): self.root_dir = root_dir self.df = pd.read_csv(caption_file) self.transform = transform self.imgs = self.df['image'] self.captions = self.df['caption'] self.vocab = Vocabulary(freq_threshold) self.vocab.build_vocab(self.captions.tolist()) def __len__(self): return len(self.df) def __getitem__(self, idx): caption = self.captions[idx] img_name = self.imgs[idx] img_location = os.path.join(self.root_dir, img_name) img = Image.open(img_location).convert('RGB') if self.transform is not None: img = self.transform(img) caption_vec = [] caption_vec += [self.vocab.stoi['<SOS>']] caption_vec += self.vocab.numericalize(caption) caption_vec += [self.vocab.stoi['<EOS>']] return (img, torch.tensor(caption_vec)) transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()]) def show_image(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) plt.pause(0.001) dataset = FlickrDataset(root_dir=data_location + '/Images', captions_file=data_location + '/captions.txt', transform=transforms) img, caps = dataset[80] BATCH_SIZE = 4 NUM_WORKER = 1 pad_idx = dataset.vocab.stoi['<PAD>'] data_loader = DataLoader(dataset=dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKER, shuffle=True, collate_fn=CapsCollate(pad_idx=pad_idx, batch_first=True)) dataiter = iter(data_loader) batch = next(dataiter) images, captions = batch for i in range(BATCH_SIZE): img, cap = (images[i], captions[i]) caption_label = [dataset.vocab.itos[token] for token in cap.tolist()] eos_index = caption_label.index('<EOS>') caption_label = caption_label[1:eos_index] caption_label = ' '.join(caption_label) show_image(img, caption_label) plt.show()
code
121149609/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd caption_file = data_location + '/captions.txt' df = pd.read_csv(caption_file) print('There are {} image to captions'.format(len(df))) df.head(7)
code
105214040/cell_29
[ "image_output_1.png" ]
import numpy as np m = 100 vx = 10 vy = 10 mx = np.array(vx + np.random.randn(m)) my = np.array(vy + np.random.randn(m)) measurements = np.vstack((mx, my)) dt = 0.1 I = np.eye(4) x = np.matrix([[0.0, 0.0, 0.0, 0.0]]).T P = np.diag([1000.0, 1000.0, 1000.0, 1000.0]) A = np.matrix([[1.0, 0.0, dt, 0.0], [0.0, 1.0, 0.0, dt], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) H = np.matrix([[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) r = 100.0 R = np.matrix([[r, 0.0], [0.0, r]]) s = 8.8 G = np.matrix([[0.5 * dt ** 2], [0.5 * dt ** 2], [dt], [dt]]) Q = G * G.T * s ** 2 xt = [] yt = [] dxt = [] dyt = [] Zx = [] Zy = [] Px = [] Py = [] Pdx = [] Pdy = [] Rdx = [] Rdy = [] Kx = [] Ky = [] Kdx = [] Kdy = [] for n in range(len(measurements[0])): x = A * x P = A * P * A.T + Q S = H * P * H.T + R K = P * H.T * np.linalg.pinv(S) Z = measurements[:, n].reshape(2, 1) y = Z - H * x x = x + K * y P = (I - K * H) * P xt.append(float(x[0])) yt.append(float(x[1])) dxt.append(float(x[2])) dyt.append(float(x[3])) Zx.append(float(Z[0])) Zy.append(float(Z[1])) Px.append(float(P[0, 0])) Py.append(float(P[1, 1])) Pdx.append(float(P[2, 2])) Pdy.append(float(P[3, 3])) Rdx.append(float(R[0, 0])) Rdy.append(float(R[1, 1])) Kx.append(float(K[0, 0])) Ky.append(float(K[1, 0])) Kdx.append(float(K[2, 0])) Kdy.append(float(K[3, 0])) def plot_K(): fig = plt.figure(figsize=(16,9)) plt.plot(range(len(measurements[0])),Kx, label='Kalman Gain for $x$') plt.plot(range(len(measurements[0])),Ky, label='Kalman Gain for $y$') plt.plot(range(len(measurements[0])),Kdx, label='Kalman Gain for $\dot x$') plt.plot(range(len(measurements[0])),Kdy, label='Kalman Gain for $\dot y$') plt.xlabel('Filter Step') plt.ylabel('') plt.title('Kalman Gain (the lower, the more the measurement fullfill the prediction)') plt.legend(loc='best',prop={'size':22}) plot_K()
code
105214040/cell_18
[ "image_output_1.png" ]
import numpy as np m = 100 vx = 10 vy = 10 mx = np.array(vx + np.random.randn(m)) my = np.array(vy + np.random.randn(m)) measurements = np.vstack((mx, my)) plt.figure(figsize=(10, 7)) plt.plot(range(m), mx, label='$v_1 (measurements)$') plt.plot(range(m), my, label='$v_2 (measurements)$') plt.ylabel('Velocity Measurements') plt.title('Noisy Measurements') plt.legend(loc='best', prop={'size': 15}) plt.show()
code
105214040/cell_16
[ "text_plain_output_1.png" ]
import numpy as np m = 100 vx = 10 vy = 10 mx = np.array(vx + np.random.randn(m)) my = np.array(vy + np.random.randn(m)) measurements = np.vstack((mx, my)) measurements
code
105214040/cell_31
[ "image_output_2.png", "image_output_1.png" ]
import numpy as np m = 100 vx = 10 vy = 10 mx = np.array(vx + np.random.randn(m)) my = np.array(vy + np.random.randn(m)) measurements = np.vstack((mx, my)) dt = 0.1 I = np.eye(4) x = np.matrix([[0.0, 0.0, 0.0, 0.0]]).T P = np.diag([1000.0, 1000.0, 1000.0, 1000.0]) A = np.matrix([[1.0, 0.0, dt, 0.0], [0.0, 1.0, 0.0, dt], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) H = np.matrix([[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) r = 100.0 R = np.matrix([[r, 0.0], [0.0, r]]) s = 8.8 G = np.matrix([[0.5 * dt ** 2], [0.5 * dt ** 2], [dt], [dt]]) Q = G * G.T * s ** 2 xt = [] yt = [] dxt = [] dyt = [] Zx = [] Zy = [] Px = [] Py = [] Pdx = [] Pdy = [] Rdx = [] Rdy = [] Kx = [] Ky = [] Kdx = [] Kdy = [] for n in range(len(measurements[0])): x = A * x P = A * P * A.T + Q S = H * P * H.T + R K = P * H.T * np.linalg.pinv(S) Z = measurements[:, n].reshape(2, 1) y = Z - H * x x = x + K * y P = (I - K * H) * P xt.append(float(x[0])) yt.append(float(x[1])) dxt.append(float(x[2])) dyt.append(float(x[3])) Zx.append(float(Z[0])) Zy.append(float(Z[1])) Px.append(float(P[0, 0])) Py.append(float(P[1, 1])) Pdx.append(float(P[2, 2])) Pdy.append(float(P[3, 3])) Rdx.append(float(R[0, 0])) Rdy.append(float(R[1, 1])) Kx.append(float(K[0, 0])) Ky.append(float(K[1, 0])) Kdx.append(float(K[2, 0])) Kdy.append(float(K[3, 0])) def plot_K(): fig = plt.figure(figsize=(16,9)) plt.plot(range(len(measurements[0])),Kx, label='Kalman Gain for $x$') plt.plot(range(len(measurements[0])),Ky, label='Kalman Gain for $y$') plt.plot(range(len(measurements[0])),Kdx, label='Kalman Gain for $\dot x$') plt.plot(range(len(measurements[0])),Kdy, label='Kalman Gain for $\dot y$') plt.xlabel('Filter Step') plt.ylabel('') plt.title('Kalman Gain (the lower, the more the measurement fullfill the prediction)') plt.legend(loc='best',prop={'size':22}) plt.figure(figsize=(10, 7)) plt.plot(range(len(measurements[0])), dxt, label='$v_1$', c='r') plt.plot(range(len(measurements[0])), dyt, label='$v_2$', c='r') plt.plot(range(len(measurements[0])), mx, label='$z_1 (measurement)$', c='g') plt.plot(range(len(measurements[0])), my, label='$z_2 (measurement)$', c='b') plt.axhline(vx, color='#999999', label='$v_1(real)$') plt.axhline(vy, color='#999999', label='$v_2(real)$') plt.title('Estimates of Velocity') plt.legend(loc='best') plt.ylim([0, 20]) plt.show() plt.figure(figsize=(10, 7)) plt.scatter(xt, yt, s=20, label='State', c='black') plt.scatter(xt[0], yt[0], s=100, label='Start', c='g') plt.scatter(xt[-1], yt[-1], s=100, label='Goal', c='r') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.title('Estimates of Position (Tracking)') plt.legend(loc='best') plt.show()
code
50223492/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) df.head(2)
code
50223492/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.head()
code
50223492/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) for items in df.columns[-3:]: sns.barplot(x=df['gender'], y=df[items]) plt.show()
code
50223492/cell_19
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) df['total'] = df.math + df.reading + df.writing df['precent'] = df['total'] / 300 * 100 df.sort_values(by='precent', ascending=False).head(10)
code
50223492/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50223492/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) df['total'] = df.math + df.reading + df.writing df['precent'] = df['total'] / 300 * 100 df.head()
code
50223492/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) df['total'] = df.math + df.reading + df.writing df['precent'] = df['total'] / 300 * 100 df.sort_values(by='precent', ascending=False).head(10) passed_ds = df[(df.math > 60) & (df.reading > 60) & (df.writing > 60)] len(passed_ds) passedPrecentage = len(passed_ds) / len(df) * 100 passedPrecentage
code
50223492/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) for items in df.columns[-3:]: sns.barplot(x=df['race'], y=df[items]) plt.show()
code
50223492/cell_22
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'p_education', 'test preparation course': 'pre', 'math score': 'math', 'reading score': 'reading', 'writing score': 'writing'}, inplace=True) df['total'] = df.math + df.reading + df.writing df['precent'] = df['total'] / 300 * 100 df.sort_values(by='precent', ascending=False).head(10) passed_ds = df[(df.math > 60) & (df.reading > 60) & (df.writing > 60)] len(passed_ds)
code
50223492/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/students-performance-in-exams/StudentsPerformance.csv') df.info()
code
105194699/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/water-potability/water_potability.csv') df.head()
code
105194699/cell_20
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum() null_columns = pd.DataFrame(df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0], columns=['Percentage of NaN values']) null_columns['Total NaN Values'] = df[df.columns[df.isnull().any()]].isnull().sum() null_columns null_cols = null_columns.index.tolist() null_cols for i in null_cols: sns.distplot(df[i])
code
105194699/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.info()
code
105194699/cell_19
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import missingno as mno import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum() null_columns = pd.DataFrame(df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0], columns=['Percentage of NaN values']) null_columns['Total NaN Values'] = df[df.columns[df.isnull().any()]].isnull().sum() null_columns null_cols = null_columns.index.tolist() null_cols import missingno as mno mno.matrix(df[null_cols], figsize=(20, 6)) plt.show()
code
105194699/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum() null_columns = pd.DataFrame(df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0], columns=['Percentage of NaN values']) null_columns['Total NaN Values'] = df[df.columns[df.isnull().any()]].isnull().sum() null_columns null_cols = null_columns.index.tolist() print(type(null_cols)) null_cols
code
105194699/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style sns.countplot(data=df, x=df.Potability) df.Potability.value_counts()
code
105194699/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum()
code
105194699/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum() null_columns = pd.DataFrame(df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0], columns=['Percentage of NaN values']) null_columns['Total NaN Values'] = df[df.columns[df.isnull().any()]].isnull().sum() null_columns
code
105194699/cell_24
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum() null_columns = pd.DataFrame(df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0], columns=['Percentage of NaN values']) null_columns['Total NaN Values'] = df[df.columns[df.isnull().any()]].isnull().sum() null_columns null_cols = null_columns.index.tolist() null_cols df['ph'] = df['ph'].replace(np.nan, df.ph.mean()) sns.distplot(df.Sulfate)
code
105194699/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style
code