path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16146132/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features.remove('Survived') num_features.append('Survived') num_features
code
16146132/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() print('{:.2f}% survival rate, {} out of {} not survived'.format(df_train.Survived.sum() / len(df_train) * 100, df_train.Survived.sum(), len(df_train)))
code
88087082/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns n_features = 300 features = [f'f_{i}' for i in range(n_features)] train = pd.read_pickle('../input/ubiquant-market-prediction-half-precision-pickle/train.pkl') plt.figure(figsize=(25, 30)) plt.title('Pearson Correlation', y=1.05, size=15) sns.heatmap(train[features].loc[:1000].corr(), annot=True)
code
88087082/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd n_features = 300 features = [f'f_{i}' for i in range(n_features)] train = pd.read_pickle('../input/ubiquant-market-prediction-half-precision-pickle/train.pkl') print(train.shape) train.head()
code
88087082/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34127846/cell_21
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json import numpy as np dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) data_len = len(headline) train_size = round(data_len * 80 / 100) train_headline = headline[0:train_size] test_headline = headline[train_size:] train_result = is_sarcastic[0:train_size] test_result = is_sarcastic[train_size:] token2 = Tokenizer(oov_token='<OOV>') token2.fit_on_texts(train_headline) word_index_2 = token2.word_index train_seq = token2.texts_to_sequences(train_headline) train_pad = pd(train_seq) test_seq = token2.texts_to_sequences(test_headline) test_pad = pd(test_seq) vocab_size = len(word_index_2) + 1 vocab_size vocab_size = len(word_index_2) + 1 model = k.Sequential([k.layers.Embedding(vocab_size, 50), k.layers.GlobalAveragePooling1D(), k.layers.Dense(24, activation='relu'), k.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() train_pad = np.array(train_pad) train_result = np.array(train_result) test_pad = np.array(test_pad) test_result = np.array(test_result) training = model.fit(train_pad, train_result, epochs=30, validation_data=(test_pad, test_result), verbose=2)
code
34127846/cell_9
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) token = Tokenizer(oov_token='<oov>') token.fit_on_texts(headline) word_index = token.word_index len(word_index)
code
34127846/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json import numpy as np dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) data_len = len(headline) train_size = round(data_len * 80 / 100) train_headline = headline[0:train_size] test_headline = headline[train_size:] train_result = is_sarcastic[0:train_size] test_result = is_sarcastic[train_size:] token2 = Tokenizer(oov_token='<OOV>') token2.fit_on_texts(train_headline) word_index_2 = token2.word_index train_seq = token2.texts_to_sequences(train_headline) train_pad = pd(train_seq) test_seq = token2.texts_to_sequences(test_headline) test_pad = pd(test_seq) train_pad = np.array(train_pad) train_result = np.array(train_result) test_pad = np.array(test_pad) test_result = np.array(test_result) type(train_pad)
code
34127846/cell_6
[ "text_plain_output_1.png" ]
import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) print(article_link[3]) print(headline[3]) print(is_sarcastic[3])
code
34127846/cell_11
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) token = Tokenizer(oov_token='<oov>') token.fit_on_texts(headline) word_index = token.word_index len(word_index) seq = token.texts_to_sequences(headline) padded = pd(seq, padding='post') padded[0] print(padded.shape)
code
34127846/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) data_len = len(headline) train_size = round(data_len * 80 / 100) train_headline = headline[0:train_size] test_headline = headline[train_size:] train_result = is_sarcastic[0:train_size] test_result = is_sarcastic[train_size:] token2 = Tokenizer(oov_token='<OOV>') token2.fit_on_texts(train_headline) word_index_2 = token2.word_index train_seq = token2.texts_to_sequences(train_headline) train_pad = pd(train_seq) test_seq = token2.texts_to_sequences(test_headline) test_pad = pd(test_seq) vocab_size = len(word_index_2) + 1 vocab_size vocab_size = len(word_index_2) + 1 model = k.Sequential([k.layers.Embedding(vocab_size, 50), k.layers.GlobalAveragePooling1D(), k.layers.Dense(24, activation='relu'), k.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
code
34127846/cell_16
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) data_len = len(headline) train_size = round(data_len * 80 / 100) train_headline = headline[0:train_size] test_headline = headline[train_size:] train_result = is_sarcastic[0:train_size] test_result = is_sarcastic[train_size:] token2 = Tokenizer(oov_token='<OOV>') token2.fit_on_texts(train_headline) word_index_2 = token2.word_index train_seq = token2.texts_to_sequences(train_headline) train_pad = pd(train_seq) test_seq = token2.texts_to_sequences(test_headline) test_pad = pd(test_seq) vocab_size = len(word_index_2) + 1 vocab_size
code
34127846/cell_3
[ "text_plain_output_1.png" ]
import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load dataset[0]
code
34127846/cell_22
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json import numpy as np dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) data_len = len(headline) train_size = round(data_len * 80 / 100) train_headline = headline[0:train_size] test_headline = headline[train_size:] train_result = is_sarcastic[0:train_size] test_result = is_sarcastic[train_size:] token2 = Tokenizer(oov_token='<OOV>') token2.fit_on_texts(train_headline) word_index_2 = token2.word_index train_seq = token2.texts_to_sequences(train_headline) train_pad = pd(train_seq) test_seq = token2.texts_to_sequences(test_headline) test_pad = pd(test_seq) vocab_size = len(word_index_2) + 1 vocab_size vocab_size = len(word_index_2) + 1 model = k.Sequential([k.layers.Embedding(vocab_size, 50), k.layers.GlobalAveragePooling1D(), k.layers.Dense(24, activation='relu'), k.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() train_pad = np.array(train_pad) train_result = np.array(train_result) test_pad = np.array(test_pad) test_result = np.array(test_result) training = model.fit(train_pad, train_result, epochs=30, validation_data=(test_pad, test_result), verbose=2) sentences = ['Meh, Kind of good', 'Climate is perfect'] sequences = token2.texts_to_sequences(sentences) latest_padded = pd(sequences) model.predict(latest_padded)
code
34127846/cell_10
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) token = Tokenizer(oov_token='<oov>') token.fit_on_texts(headline) word_index = token.word_index len(word_index) seq = token.texts_to_sequences(headline) padded = pd(seq, padding='post') padded[0]
code
34127846/cell_12
[ "text_plain_output_1.png" ]
import json dataset = [] for line in open('/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset.json', 'r'): dataset.append(json.loads(line)) json.load article_link = [] headline = [] is_sarcastic = [] for item in dataset: article_link.append(item['article_link']) headline.append(item['headline']) is_sarcastic.append(item['is_sarcastic']) data_len = len(headline) train_size = round(data_len * 80 / 100) print(train_size)
code
122258057/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() train_df['Survived'].value_counts(normalize=True).plot(kind='bar', label='Выжившие')
code
122258057/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.info()
code
122258057/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.histplot(train_df['Survived'])
code
122258057/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() pd.plotting.scatter_matrix(train_df[['Age', 'SibSp']], alpha=0.2)
code
122258057/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() train_df.info()
code
122258057/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() train_df['Survived'].hist(bins=2)
code
122258057/cell_48
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.barplot(x='Sex', y='Survived', hue='Embarked', data=train_df) plt.legend() plt.xlabel('пол') plt.ylabel('Доля выживших') plt.title('Доля выживших для мужчин и женщин в зависимости от порта')
code
122258057/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.head(5)
code
122258057/cell_50
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() f, ax = plt.subplots(figsize=(25, 10)) sns.countplot(x='Age', hue='Survived', data=train_df[(train_df['Age'] > 5) & (train_df['Age'] < 30)])
code
122258057/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.pairplot(train_df, vars=['Age', 'SibSp'])
code
122258057/cell_16
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df.describe(include='int64')
code
122258057/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.jointplot(x='Age', y='SibSp', data=train_df)
code
122258057/cell_47
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.barplot(x='Sex', y='Survived', data=train_df) plt.legend() plt.xlabel('пол') plt.ylabel('доля выживших') plt.title('Соотношение выживших для мужчин и женщин')
code
122258057/cell_43
[ "text_plain_output_1.png" ]
""" sns.boxplot(y="Fare", x="Pclass", data=train_df, orient="h"); Такой boxplot получается не очень красивым из-за выбросов.** Опционально: создайте признак `Fare_no_out` (стоимости без выбросов), в котором исключаются стоимости, отличающиеся от средней по классу более чем на 2 стандартных отклонения. Важно: надо исключать выбросы именно в зависимости от класса каюты. Иначе исключаться будут только самые большие (1 класс) и малые (3 класс) стоимости. train_df['Fare_no_out'] = train_df['Fare'] fare_pclass1 = train_df[train_df['Pclass'] == 1]['Fare'] fare_pclass2 = train_df[train_df['Pclass'] == 2]['Fare'] fare_pclass3 = train_df[train_df['Pclass'] == 3]['Fare'] fare_pclass1_no_out = # Ваш код здесь fare_pclass2_no_out = # Ваш код здесь fare_pclass3_no_out = # Ваш код здесь train_df['Fare_no_out'] = fare_pclass1_no_out.append(fare_pclass2_no_out) .append(fare_pclass3_no_out) #новый box plot """
code
122258057/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() train_df.info()
code
122258057/cell_46
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.countplot(x='Sex', hue='Survived', data=train_df) plt.legend() plt.xlabel('пол') plt.ylabel('кол-во выживших') plt.title('Соотношение выживших для мужчин и женщин') plt.savefig('qwe.png', dpi=300)
code
122258057/cell_24
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() sns.displot(train_df['Survived'])
code
122258057/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum()
code
122258057/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() plt.hist(x=train_df['Survived'])
code
122258057/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() train_df.plot.scatter(x='Age', y='SibSp')
code
122258057/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_df = pd.read_csv('../input/titanic/titanic_train.csv', index_col='PassengerId') train_df.isnull().sum() train_df = train_df.drop('Cabin', axis=1).dropna() plt.scatter(train_df['Age'], train_df['SibSp'])
code
1009955/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) price = pd.read_csv('../input/price.csv') price_per_sqft = pd.read_csv('../input/pricepersqft.csv') flat_price = pd.melt(price, id_vars=['City Code', 'City', 'Metro', 'County', 'State', 'Population Rank']) flat_price.dropna(inplace=True) top10 = flat_price[flat_price['variable'] == 'January 2017'].sort_values(by=['value'], ascending=False).head(10) top10['City_State'] = top10['City'] + ' ' + top10['State'] ax = top10[['City_State', 'value']].plot(kind='bar', use_index=False) ax.set_xticklabels(top10['City_State'])
code
1009955/cell_20
[ "text_html_output_1.png" ]
flat_grouped = flat_price_sorted.groupby(['City_State']) value_diff = flat_grouped['value'].agg({'value': ['first', 'last']}) value_diff['value']['last'] - value_diff['value']['first']
code
1009955/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
flat_grouped = flat_price_sorted.groupby(['City_State']) value_diff = flat_grouped['value'].agg({'value': ['first', 'last']})
code
1009955/cell_18
[ "text_html_output_1.png" ]
flat_grouped = flat_price_sorted.groupby(['City_State'])
code
1009955/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) price = pd.read_csv('../input/price.csv') price_per_sqft = pd.read_csv('../input/pricepersqft.csv') flat_price = pd.melt(price, id_vars=['City Code', 'City', 'Metro', 'County', 'State', 'Population Rank']) flat_price.head()
code
1009955/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009955/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) price = pd.read_csv('../input/price.csv') price_per_sqft = pd.read_csv('../input/pricepersqft.csv') flat_price = pd.melt(price, id_vars=['City Code', 'City', 'Metro', 'County', 'State', 'Population Rank']) flat_price.dropna(inplace=True) flat_price.sort_values(by=['City Code', 'date']).head(10)
code
1009955/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) price = pd.read_csv('../input/price.csv') price_per_sqft = pd.read_csv('../input/pricepersqft.csv') price.head()
code
73073936/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x = training_dataframe['GrLivArea'], y = training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Deleting outliers training_dataframe= training_dataframe.drop(training_dataframe[(training_dataframe['GrLivArea']>4000) & (training_dataframe['SalePrice']<300000)].index) #Check the scatterplot again fig, ax = plt.subplots() ax.scatter(training_dataframe['GrLivArea'], training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() training_dataframe['SalePrice'].describe()
code
73073936/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x=training_dataframe['GrLivArea'], y=training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show()
code
73073936/cell_2
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') training_dataframe.head()
code
73073936/cell_11
[ "text_html_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew import matplotlib.pyplot as plt import os import pandas as pd import seaborn as sns import warnings import os import pandas as pd import numpy as np import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import math import matplotlib.pyplot as plt from scipy.stats import skew import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x = training_dataframe['GrLivArea'], y = training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Deleting outliers training_dataframe= training_dataframe.drop(training_dataframe[(training_dataframe['GrLivArea']>4000) & (training_dataframe['SalePrice']<300000)].index) #Check the scatterplot again fig, ax = plt.subplots() ax.scatter(training_dataframe['GrLivArea'], training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() sns.distplot(training_dataframe['SalePrice'], fit=norm) mu, sigma = norm.fit(training_dataframe['SalePrice']) print('\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show()
code
73073936/cell_19
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import os import pandas as pd import numpy as np import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import math import matplotlib.pyplot as plt from scipy.stats import skew import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x = training_dataframe['GrLivArea'], y = training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Deleting outliers training_dataframe= training_dataframe.drop(training_dataframe[(training_dataframe['GrLivArea']>4000) & (training_dataframe['SalePrice']<300000)].index) #Check the scatterplot again fig, ax = plt.subplots() ax.scatter(training_dataframe['GrLivArea'], training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Plotting histogram sns.distplot(training_dataframe['SalePrice'] , fit=norm); # Fitted Parameters used by function (mu, sigma) = norm.fit(training_dataframe['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Plotting the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Plotting QQ-plot fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show() #using log1p which applies log(1+x) to all elements of the column training_dataframe["SalePrice"] = np.log1p(training_dataframe["SalePrice"]) #Checking the new distribution sns.distplot(training_dataframe['SalePrice'] , fit=norm); # Geting fitted parameters used by the function (mu, sigma) = norm.fit(training_dataframe['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Plotting Distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Get also the QQ-plot fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show() corealtion_matrix = training_dataframe.corr() best_cor_feature = corealtion_matrix.index[abs(corealtion_matrix['SalePrice']) > 0.5] best_cor_feature def plotColor(*args): a = 3 b = int(len(args) / a) + 1 c = 1 for i in args: plt.axis('off') plt.text(0, 0.04, i, color='k', fontsize=11) plt.hlines(0, 0, 10, color=i, linestyles='solid', linewidth=25) c = c + 1 plt.tight_layout() return ntrain = training_dataframe.shape[0] ntest = testing_dataframe.shape[0] y_train = training_dataframe.SalePrice.values print('y_train shape is : {}'.format(y_train.shape)) all_data = pd.concat((training_dataframe, testing_dataframe)).reset_index(drop=True) all_data.drop(['SalePrice'], axis=1, inplace=True) print('all_data size is : {}'.format(all_data.shape)) total = all_data.isnull().sum().sort_values(ascending=False) percent = (all_data.isnull().sum() / all_data.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(30)
code
73073936/cell_1
[ "text_plain_output_1.png" ]
import os import seaborn as sns import warnings import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd import numpy as np import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import math import matplotlib.pyplot as plt from scipy.stats import skew import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew
code
73073936/cell_7
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x = training_dataframe['GrLivArea'], y = training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() training_dataframe = training_dataframe.drop(training_dataframe[(training_dataframe['GrLivArea'] > 4000) & (training_dataframe['SalePrice'] < 300000)].index) fig, ax = plt.subplots() ax.scatter(training_dataframe['GrLivArea'], training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show()
code
73073936/cell_16
[ "image_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import os import pandas as pd import numpy as np import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import math import matplotlib.pyplot as plt from scipy.stats import skew import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x = training_dataframe['GrLivArea'], y = training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Deleting outliers training_dataframe= training_dataframe.drop(training_dataframe[(training_dataframe['GrLivArea']>4000) & (training_dataframe['SalePrice']<300000)].index) #Check the scatterplot again fig, ax = plt.subplots() ax.scatter(training_dataframe['GrLivArea'], training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Plotting histogram sns.distplot(training_dataframe['SalePrice'] , fit=norm); # Fitted Parameters used by function (mu, sigma) = norm.fit(training_dataframe['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Plotting the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Plotting QQ-plot fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show() #using log1p which applies log(1+x) to all elements of the column training_dataframe["SalePrice"] = np.log1p(training_dataframe["SalePrice"]) #Checking the new distribution sns.distplot(training_dataframe['SalePrice'] , fit=norm); # Geting fitted parameters used by the function (mu, sigma) = norm.fit(training_dataframe['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Plotting Distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Get also the QQ-plot fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show() corealtion_matrix = training_dataframe.corr() best_cor_feature = corealtion_matrix.index[abs(corealtion_matrix['SalePrice']) > 0.5] plt.figure(figsize=(12, 12)) sns.heatmap(training_dataframe[best_cor_feature].corr(), annot=True) best_cor_feature def plotColor(*args): a = 3 b = int(len(args) / a) + 1 c = 1 plt.figure(figsize=(a * 3, b)) for i in args: plt.subplot(b, a, c) plt.axis('off') plt.text(0, 0.04, i, color='k', fontsize=11) plt.hlines(0, 0, 10, color=i, linestyles='solid', linewidth=25) c = c + 1 plt.tight_layout() plt.show() return print('\tfuntion plotColor created ...')
code
73073936/cell_14
[ "image_output_1.png" ]
from scipy import stats from scipy.stats import norm, skew import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import warnings import os import pandas as pd import numpy as np import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import math import matplotlib.pyplot as plt from scipy.stats import skew import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn from scipy import stats from scipy.stats import norm, skew training_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') testing_dataframe = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') fig, ax = plt.subplots() ax.scatter(x = training_dataframe['GrLivArea'], y = training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Deleting outliers training_dataframe= training_dataframe.drop(training_dataframe[(training_dataframe['GrLivArea']>4000) & (training_dataframe['SalePrice']<300000)].index) #Check the scatterplot again fig, ax = plt.subplots() ax.scatter(training_dataframe['GrLivArea'], training_dataframe['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() #Plotting histogram sns.distplot(training_dataframe['SalePrice'] , fit=norm); # Fitted Parameters used by function (mu, sigma) = norm.fit(training_dataframe['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Plotting the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Plotting QQ-plot fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show() training_dataframe['SalePrice'] = np.log1p(training_dataframe['SalePrice']) sns.distplot(training_dataframe['SalePrice'], fit=norm) mu, sigma = norm.fit(training_dataframe['SalePrice']) print('\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') fig = plt.figure() res = stats.probplot(training_dataframe['SalePrice'], plot=plt) plt.show()
code
90146658/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import datasets from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) print(iris.DESCR)
code
90146658/cell_8
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn import datasets iris = datasets.load_iris() list(iris.keys())
code
90146658/cell_15
[ "text_plain_output_1.png" ]
from sklearn import datasets import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) X = iris.data[:, 2:] y = iris.target iris = sns.load_dataset('iris') sns.set_style('whitegrid') sns.FacetGrid(iris, hue='species', height=6).map(plt.scatter, 'petal_width', 'petal_length').add_legend() iris.head()
code
90146658/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) X = iris.data[:, 2:] y = iris.target tree_clf = DecisionTreeClassifier(max_depth=2) tree_clf.fit(X, y)
code
90146658/cell_17
[ "text_html_output_1.png" ]
from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from sklearn.tree import export_graphviz import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) X = iris.data[:, 2:] y = iris.target iris = sns.load_dataset('iris') sns.set_style('whitegrid') sns.FacetGrid(iris, hue='species', height=6).map(plt.scatter, 'petal_width', 'petal_length').add_legend() tree_clf = DecisionTreeClassifier(max_depth=2) tree_clf.fit(X, y) from sklearn.tree import export_graphviz export_graphviz(tree_clf, out_file=image_path('iris_tree.dot'), feature_names=iris.feature_names[2:], class_names=iris.target_names, rounded=True, filled=True)
code
90146658/cell_14
[ "text_plain_output_1.png" ]
from sklearn import datasets import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) X = iris.data[:, 2:] y = iris.target iris = sns.load_dataset('iris') sns.set_style('whitegrid') sns.FacetGrid(iris, hue='species', height=6).map(plt.scatter, 'petal_width', 'petal_length').add_legend()
code
90146658/cell_12
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn import datasets iris = datasets.load_iris() list(iris.keys()) X = iris.data[:, 2:] y = iris.target X[:5, :]
code
32073950/cell_11
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.options.mode.chained_assignment = None import os crime = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/crime-data-from-2010-to-present.csv') arrest = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/arrest-data-from-2010-to-present.csv') ocrd = [list(), list(), list()] rprt = [list(), list(), list()] datas = [list(crime['Date Occurred']), list(crime['Date Reported'])] lists = [ocrd, rprt] x = 0 while x < 2: for i in datas[x]: temp = i.split('-') if len(str(temp[0])) == 7: lists[x][0].append(str(temp[0])[3:8]) else: lists[x][0].append(temp[0]) lists[x][1].append(str(temp[1])) lists[x][2].append(str(temp[2])[0:2]) x += 1 dist = crime['Area Name'].unique() rate = [] for i in dist: x = len(crime.loc[crime['Area Name'] == i]) rate.append(x) rate.sort(reverse=True) crime19 = crime.loc[crime.RepY == '2019'] xxx = crime19.loc[(crime19.RepD == '01') & (crime19.RepM == '01')] len(xxx.RepY)
code
32073950/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.options.mode.chained_assignment = None import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32073950/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.options.mode.chained_assignment = None import os crime = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/crime-data-from-2010-to-present.csv') arrest = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/arrest-data-from-2010-to-present.csv') ocrd = [list(), list(), list()] rprt = [list(), list(), list()] datas = [list(crime['Date Occurred']), list(crime['Date Reported'])] lists = [ocrd, rprt] x = 0 while x < 2: for i in datas[x]: temp = i.split('-') if len(str(temp[0])) == 7: lists[x][0].append(str(temp[0])[3:8]) else: lists[x][0].append(temp[0]) lists[x][1].append(str(temp[1])) lists[x][2].append(str(temp[2])[0:2]) x += 1 dist = crime['Area Name'].unique() rate = [] for i in dist: x = len(crime.loc[crime['Area Name'] == i]) rate.append(x) rate.sort(reverse=True) f, ax = plt.subplots(figsize=(15, 8)) sns.barplot(x=dist, y=rate) plt.xticks(rotation=45) plt.xlabel = 'Area' plt.ylabel = 'Count' plt.show()
code
32073950/cell_15
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.options.mode.chained_assignment = None import os crime = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/crime-data-from-2010-to-present.csv') arrest = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/arrest-data-from-2010-to-present.csv') ocrd = [list(), list(), list()] rprt = [list(), list(), list()] datas = [list(crime['Date Occurred']), list(crime['Date Reported'])] lists = [ocrd, rprt] x = 0 while x < 2: for i in datas[x]: temp = i.split('-') if len(str(temp[0])) == 7: lists[x][0].append(str(temp[0])[3:8]) else: lists[x][0].append(temp[0]) lists[x][1].append(str(temp[1])) lists[x][2].append(str(temp[2])[0:2]) x += 1 dist = crime['Area Name'].unique() rate = [] for i in dist: x = len(crime.loc[crime['Area Name'] == i]) rate.append(x) rate.sort(reverse=True) crime19 = crime.loc[crime.RepY == '2019'] xxx = crime19.loc[(crime19.RepD == '01') & (crime19.RepM == '01')] len(xxx.RepY) crimex = crime crimesWo19 = crimex.loc[crimex.RepY != '2019'] Delay = [] Mouth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] OD = list(crimesWo19.OccD) RD = list(crimesWo19.RepD) OM = list(crimesWo19.OccM) for i in range(1888002): od = int(OD[i]) rd = int(RD[i]) Day = rd - od if Day < 0: M = int(OM[i]) - 1 Ekstra = Mouth[M] - od day = Ekstra + rd Delay.append(day) else: Delay.append(Day) crimesWo19['delay'] = Delay crimesWo19.tail()
code
32073950/cell_12
[ "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.options.mode.chained_assignment = None import os crime = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/crime-data-from-2010-to-present.csv') arrest = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/arrest-data-from-2010-to-present.csv') ocrd = [list(), list(), list()] rprt = [list(), list(), list()] datas = [list(crime['Date Occurred']), list(crime['Date Reported'])] lists = [ocrd, rprt] x = 0 while x < 2: for i in datas[x]: temp = i.split('-') if len(str(temp[0])) == 7: lists[x][0].append(str(temp[0])[3:8]) else: lists[x][0].append(temp[0]) lists[x][1].append(str(temp[1])) lists[x][2].append(str(temp[2])[0:2]) x += 1 dist = crime['Area Name'].unique() rate = [] for i in dist: x = len(crime.loc[crime['Area Name'] == i]) rate.append(x) rate.sort(reverse=True) crime19 = crime.loc[crime.RepY == '2019'] xxx = crime19.loc[(crime19.RepD == '01') & (crime19.RepM == '01')] len(xxx.RepY) len(crime19.RepY) - len(xxx.RepY)
code
32073950/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.options.mode.chained_assignment = None import os crime = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/crime-data-from-2010-to-present.csv') arrest = pd.read_csv('/kaggle/input/los-angeles-crime-arrest-data/arrest-data-from-2010-to-present.csv') crime.head()
code
73079159/cell_13
[ "text_html_output_1.png" ]
from datetime import datetime from gluonts.dataset.common import ListDataset from gluonts.dataset.field_names import FieldName from gluonts.model.deepar import DeepAREstimator from gluonts.mx.trainer import Trainer import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape df_train.shape freq = 'M' start_train = pd.Timestamp('2007-01-01', freq=freq) start_test = pd.Timestamp('2016-07-01', freq=freq) prediction_length = 2 estimator = DeepAREstimator(freq=freq, context_length=12, prediction_length=prediction_length, use_feat_static_cat=True, cardinality=[1], num_layers=2, num_cells=8, cell_type='lstm', trainer=Trainer(epochs=300, learning_rate=0.01, learning_rate_decay_factor=0.1)) from gluonts.dataset.common import ListDataset from gluonts.dataset.field_names import FieldName train_ds = ListDataset([{FieldName.TARGET: target, FieldName.START: start_train, FieldName.FEAT_STATIC_CAT: fsc} for target, fsc in zip(df_train, ts_code.reshape(-1, 1))], freq=freq) test_ds = ListDataset([{FieldName.TARGET: target, FieldName.START: start_test, FieldName.FEAT_STATIC_CAT: fsc} for target, fsc in zip(df_test, ts_code.reshape(-1, 1))], freq=freq) predictor = estimator.train(training_data=train_ds)
code
73079159/cell_9
[ "text_html_output_1.png" ]
from datetime import datetime import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_train.shape
code
73079159/cell_6
[ "image_output_1.png" ]
from datetime import datetime import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel('date') axx[i].set_ylabel(f'{df.columns[i]} Car Sales') axx[i].grid(which='minor', axis='x')
code
73079159/cell_29
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from datetime import datetime from gluonts.dataset.common import ListDataset from gluonts.dataset.common import ListDataset from gluonts.dataset.field_names import FieldName from gluonts.model.deepar import DeepAREstimator from gluonts.model.deepar import DeepAREstimator from gluonts.mx import Trainer from gluonts.mx.trainer import Trainer import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape df_train.shape freq = 'M' start_train = pd.Timestamp('2007-01-01', freq=freq) start_test = pd.Timestamp('2016-07-01', freq=freq) prediction_length = 2 estimator = DeepAREstimator(freq=freq, context_length=12, prediction_length=prediction_length, use_feat_static_cat=True, cardinality=[1], num_layers=2, num_cells=8, cell_type='lstm', trainer=Trainer(epochs=300, learning_rate=0.01, learning_rate_decay_factor=0.1)) from gluonts.dataset.common import ListDataset from gluonts.dataset.field_names import FieldName train_ds = ListDataset([{FieldName.TARGET: target, FieldName.START: start_train, FieldName.FEAT_STATIC_CAT: fsc} for target, fsc in zip(df_train, ts_code.reshape(-1, 1))], freq=freq) test_ds = ListDataset([{FieldName.TARGET: target, FieldName.START: start_test, FieldName.FEAT_STATIC_CAT: fsc} for target, fsc in zip(df_test, ts_code.reshape(-1, 1))], freq=freq) predictor = estimator.train(training_data=train_ds) def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] make = make[make['Make'] == 'Ford'] df_input = make[['Year_Month', 'Quantity']] df_input = df_input.set_index('Year_Month') train_time = '2016-08-01' prediction_length = 6 estimator = DeepAREstimator(freq='1M', context_length=12, prediction_length=prediction_length, num_layers=2, num_cells=128, cell_type='lstm', trainer=Trainer(epochs=20)) from gluonts.dataset.common import ListDataset training_data = ListDataset([{'start': df_input.index[0], 'target': df_input.Quantity[:train_time]}], freq='1M') predictor = estimator.train(training_data=training_data)
code
73079159/cell_19
[ "text_plain_output_1.png" ]
item_metrics
code
73079159/cell_1
[ "text_plain_output_1.png" ]
## Install the package #!pip install --upgrade mxnet-cu101==1.6.0.post0 !pip install --upgrade mxnet==1.6.0 !pip install gluonts
code
73079159/cell_18
[ "text_plain_output_1.png" ]
from datetime import datetime from gluonts.evaluation import Evaluator from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape from tqdm.autonotebook import tqdm tss = list(tqdm(ts_it, total=len(df_test))) forecasts = list(tqdm(forecast_it, total=len(df_test))) from gluonts.evaluation import Evaluator evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9]) agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(df_test))
code
73079159/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape
code
73079159/cell_15
[ "text_html_output_1.png" ]
from datetime import datetime from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape from tqdm.autonotebook import tqdm print('Obtaining time series conditioning values ...') tss = list(tqdm(ts_it, total=len(df_test))) print('Obtaining time series predictions ...') forecasts = list(tqdm(forecast_it, total=len(df_test)))
code
73079159/cell_3
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from datetime import datetime import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] make.head(3)
code
73079159/cell_17
[ "image_output_1.png" ]
from datetime import datetime from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape freq = 'M' start_train = pd.Timestamp('2007-01-01', freq=freq) start_test = pd.Timestamp('2016-07-01', freq=freq) prediction_length = 2 from tqdm.autonotebook import tqdm tss = list(tqdm(ts_it, total=len(df_test))) forecasts = list(tqdm(forecast_it, total=len(df_test))) def plot_prob_forecasts(ts_entry, forecast_entry): plot_length = prediction_length prediction_intervals = (80.0, 95.0) legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1] fig, ax = plt.subplots(1, 1, figsize=(10, 7)) ts_entry[-plot_length:].plot(ax=ax) forecast_entry.plot(prediction_intervals=prediction_intervals, color='g') plt.grid(which="both") plt.legend(legend, loc="upper left") plt.show() for i in tqdm(range(5)): ts_entry = tss[i] forecast_entry = forecasts[i] plot_prob_forecasts(ts_entry, forecast_entry)
code
73079159/cell_35
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape freq = 'M' start_train = pd.Timestamp('2007-01-01', freq=freq) start_test = pd.Timestamp('2016-07-01', freq=freq) prediction_length = 2 from tqdm.autonotebook import tqdm tss = list(tqdm(ts_it, total=len(df_test))) forecasts = list(tqdm(forecast_it, total=len(df_test))) def plot_prob_forecasts(ts_entry, forecast_entry): plot_length = prediction_length prediction_intervals = (80.0, 95.0) legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1] fig, ax = plt.subplots(1, 1, figsize=(10, 7)) ts_entry[-plot_length:].plot(ax=ax) forecast_entry.plot(prediction_intervals=prediction_intervals, color='g') plt.grid(which="both") plt.legend(legend, loc="upper left") plt.show() for i in tqdm(range(5)): ts_entry = tss[i] forecast_entry = forecasts[i] train_time = '2016-08-01' prediction_length = 6 forecasts = list(forecast_it) tss = list(ts_it) forecast_entry = forecasts[0] def plot_prob_forecasts(ts_entry, forecast_entry): plot_length = prediction_length prediction_intervals = (80.0, 95.0) legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1] fig, ax = plt.subplots(1, 1, figsize=(10, 7)) ts_entry[-plot_length:].plot(ax=ax) forecast_entry.plot(prediction_intervals=prediction_intervals, color='g') plt.grid(which="both") plt.legend(legend, loc="upper left") plt.show() plot_prob_forecasts(tss[0], forecasts[0])
code
73079159/cell_22
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime import pandas as pd import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() freq = 'M' start_train = pd.Timestamp('2007-01-01', freq=freq) start_test = pd.Timestamp('2016-07-01', freq=freq) prediction_length = 2 def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] make.head(3)
code
73079159/cell_37
[ "text_html_output_1.png" ]
item_metrics
code
73079159/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() df.head()
code
73079159/cell_36
[ "text_html_output_1.png" ]
from datetime import datetime from gluonts.dataset.common import ListDataset from gluonts.dataset.common import ListDataset from gluonts.evaluation import Evaluator from gluonts.evaluation import Evaluator from tqdm.autonotebook import tqdm import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] def subset_data(dataframe=make, subset=['Toyota', 'Ford', 'BMW', 'Honda', 'Peugeot']): """ Function to subset the data INPUT : dataframe to subset and car type list OUTPUT : 5 different dataframes with the subset data """ dataframe.set_index('Year_Month', inplace=True) toyota = dataframe[dataframe['Make'] == subset[0]] ford = dataframe[dataframe['Make'] == subset[1]] honda = dataframe[dataframe['Make'] == subset[3]] BMW = dataframe[dataframe['Make'] == subset[2]] peugeot = dataframe[dataframe['Make'] == subset[4]] df = pd.concat([toyota['Quantity'], ford['Quantity'], BMW['Quantity'], honda['Quantity'], peugeot['Quantity']], axis=1) df.columns = [subset[0], subset[1], subset[2], subset[3], subset[4]] return df df = subset_data() ### How the time series is for each Car model ### Some show upward trend some show downward trend fig, axs = plt.subplots(2, 2, figsize=(20, 20), sharex=True) axx = axs.ravel() for i in range(0, 4): df[df.columns[i]].plot(ax=axx[i]) axx[i].set_xlabel("date") axx[i].set_ylabel(f"{df.columns[i]} Car Sales") axx[i].grid(which='minor', axis='x') df_input = df.reset_index(drop=True).T.reset_index() ts_code = df_input['index'].astype('category').cat.codes.values df_train = df_input.iloc[:, 1:116].values df_test = df_input.iloc[:, 116:].values df_test.shape freq = 'M' start_train = pd.Timestamp('2007-01-01', freq=freq) start_test = pd.Timestamp('2016-07-01', freq=freq) prediction_length = 2 from tqdm.autonotebook import tqdm tss = list(tqdm(ts_it, total=len(df_test))) forecasts = list(tqdm(forecast_it, total=len(df_test))) from gluonts.evaluation import Evaluator evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9]) agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(df_test)) def convert_to_date(x): return datetime.strptime(x, '%Y %m') make = pd.read_csv('../input/newcarsalesnorway/norway_new_car_sales_by_make.csv', parse_dates=[['Year', 'Month']], date_parser=convert_to_date) make = make[['Year_Month', 'Quantity', 'Make']] make = make[make['Make'] == 'Ford'] df_input = make[['Year_Month', 'Quantity']] df_input = df_input.set_index('Year_Month') train_time = '2016-08-01' prediction_length = 6 from gluonts.dataset.common import ListDataset training_data = ListDataset([{'start': df_input.index[0], 'target': df_input.Quantity[:train_time]}], freq='1M') test_data = ListDataset([{'start': df_input.index[0], 'target': df_input.Quantity[:'2017-01-01']}], freq='1M') forecasts = list(forecast_it) tss = list(ts_it) from gluonts.evaluation import Evaluator evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9]) agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_data))
code
2014978/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
df.Department.groupby(df.Department).count().plot(kind='bar') Specialization = 'Area of Specialization/Research Interests' df[Specialization].value_counts().sort_values()[::-1][:20].plot(kind='bar')
code
2014978/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
df.Department.groupby(df.Department).count().plot(kind='bar')
code
2014978/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import plotly.plotly as py import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) plt.rcParams['figure.figsize'] = (12, 5) df = pd.read_csv('../input/Pakistan Intellectual Capital - Computer Science - Ver 1.csv', encoding='ISO-8859-1') df.head()
code
2014978/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
df.Department.groupby(df.Department).count().plot(kind='bar') province = 'Province University Located' df[province].value_counts().sort_values().plot(kind='bar')
code
2014978/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
df.Department.groupby(df.Department).count().plot(kind='bar') df['Country'].value_counts().sort_values()[::-1][1:].plot(kind='bar')
code
17132381/cell_42
[ "text_plain_output_1.png" ]
acts = hook_a.stored[0].cpu() acts.shape
code
17132381/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01)
code
17132381/cell_13
[ "text_html_output_1.png" ]
import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) df['diagnosis'].hist(figsize=(10, 5))
code
17132381/cell_23
[ "text_html_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001))
code
17132381/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os os.listdir('../input')
code
17132381/cell_26
[ "text_html_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs)
code
17132381/cell_11
[ "text_plain_output_1.png" ]
import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) len_df = len(df) print(f'There are {len_df} images')
code
17132381/cell_7
[ "image_output_1.png" ]
print('Make sure cudnn is enabled:', torch.backends.cudnn.enabled)
code
17132381/cell_49
[ "text_plain_output_1.png" ]
from sklearn.metrics import cohen_kappa_score import matplotlib.pyplot as plt import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) from sklearn.metrics import cohen_kappa_score def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(y_hat.argmax(dim=-1), y, weights='quadratic'), device='cuda:0') learn = cnn_learner(data, base_arch=models.resnet50, metrics=[quadratic_kappa]) learn.fit_one_cycle(4, max_lr=0.01) learn.unfreeze() learn.fit_one_cycle(6, max_lr=slice(1e-06, 0.001)) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) idx = 1 im, cl = learn.data.dl(DatasetType.Valid).dataset[idx] cl = int(cl) xb, _ = data.one_item(im) xb_im = Image(data.denorm(xb)[0]) xb = xb.cuda() m = learn.model.eval() acts = hook_a.stored[0].cpu() acts.shape grad = hook_g.stored[0][0].cpu() grad.shape grad_chan = grad.mean(1).mean(1) grad_chan.shape mult = F.relu((acts * grad_chan[..., None, None]).sum(0)) mult.shape #Utility function to display heatmap: def show_heatmap(hm): _,ax = plt.subplots() sz = list(xb_im.shape[-2:]) xb_im.show(ax,title=f"pred. class: {interp.pred_class[idx]}, actual class: {learn.data.classes[cl]}") ax.imshow(hm, alpha=0.6, extent=(0,*sz[::-1],0), interpolation='bilinear', cmap='magma') return _,ax show_heatmap(mult)
code
17132381/cell_16
[ "text_plain_output_1.png" ]
import os import pandas as pd import os os.listdir('../input') def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED) base_image_dir = os.path.join('..', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir, 'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir, '{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1).reset_index(drop=True) bs = 64 sz = 224 tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=360, max_warp=0, max_zoom=1.1, max_lighting=0.1, p_lighting=0.5) src = ImageList.from_df(df=df, path='./', cols='path').split_by_rand_pct(0.2).label_from_df(cols='diagnosis') data = src.transform(tfms, size=sz, resize_method=ResizeMethod.SQUISH, padding_mode='zeros').databunch(bs=bs, num_workers=4).normalize(imagenet_stats) data.show_batch(rows=3, figsize=(7, 6))
code
17132381/cell_47
[ "text_plain_output_1.png" ]
acts = hook_a.stored[0].cpu() acts.shape grad = hook_g.stored[0][0].cpu() grad.shape grad_chan = grad.mean(1).mean(1) grad_chan.shape mult = F.relu((acts * grad_chan[..., None, None]).sum(0)) mult.shape
code
17132381/cell_43
[ "text_plain_output_1.png" ]
grad = hook_g.stored[0][0].cpu() grad.shape
code