path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105194699/cell_22
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique() df.describe().T.style df.Potability.value_counts() df.isnull().sum() null_columns = pd.DataFrame(df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0], columns=['Percentage of NaN values']) null_columns['Total NaN Values'] = df[df.columns[df.isnull().any()]].isnull().sum() null_columns null_cols = null_columns.index.tolist() null_cols sns.distplot(df.ph)
code
105194699/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape
code
105194699/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/water-potability/water_potability.csv') df.shape df.nunique()
code
90148477/cell_13
[ "text_html_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] regr = linear_model.LinearRegression() regr.fit(x.values, y) print('intercept :', regr.intercept_) print('coefficient :', regr.coef_) print(x)
code
90148477/cell_23
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import numpy as np import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] regr = linear_model.LinearRegression() regr.fit(x.values, y) df.isnull().sum() df.notnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] f = function(x, y) x = df[['ram']] y = df['price_range'] import numpy as np def find_theta(X, y): m = X.shape[0] X = np.append(X, np.ones((m, 1)), axis=1) theta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)) return theta def predict(X): X = np.append(X, np.ones((X.shape[0], 1)), axis=1) preds = np.dot(X, theta) return preds theta = find_theta(x, y) print(theta) preds = predict(x) fig = plt.figure(figsize=(8, 6)) plt.plot(x, y, 'b.') plt.plot(x, preds, 'c-') plt.xlabel('Input') plt.ylabel('target')
code
90148477/cell_20
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] regr = linear_model.LinearRegression() regr.fit(x.values, y) df.isnull().sum() df.notnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] f = function(x, y) plt.scatter(x, f) plt.plot(x, f) plt.xlabel('X') plt.ylabel('f(X)')
code
90148477/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas df = pandas.read_csv('../input/mobile-price-classification/train.csv') df.describe()
code
90148477/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import numpy as np import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] regr = linear_model.LinearRegression() regr.fit(x.values, y) predicted = regr.predict([[2.0, 10, 1500, 1200, 1, 1, 2000]]) predicted = regr.predict([[2.0, 10, 1500, 1200, 1, 1, 4080]]) df.isnull().sum() df.notnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] f = function(x, y) x = df[['ram']] y = df['price_range'] import numpy as np def find_theta(X, y): m = X.shape[0] X = np.append(X, np.ones((m,1)), axis=1) theta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)) return theta def predict(X): X = np.append(X, np.ones((X.shape[0],1)), axis=1) preds = np.dot(X, theta) return preds theta = find_theta(x, y) print(theta) preds = predict(x) fig = plt.figure(figsize=(8,6)) plt.plot(x, y, 'b.') plt.plot(x, preds, 'c-') plt.xlabel('Input') plt.ylabel('target') x = df[['ram']] y = df['price_range'] regr2 = linear_model.LinearRegression() regr2.fit(x.values, y) arr = [] index = [] for i in range(0, 4000, 1): predicted = regr2.predict([[i]]) arr.append(predicted[0]) index.append(i) fig = plt.figure(figsize=(8, 6)) plt.plot(x, y, 'b.') plt.plot(index, arr, 'c-')
code
90148477/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() plt.figure(figsize=(15, 10)) sns.heatmap(corr, vmax=0.5, annot=True, fmt='.2f') plt.show()
code
90148477/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() df.isnull().sum() df.notnull().sum()
code
90148477/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas df = pandas.read_csv('../input/mobile-price-classification/train.csv') df.hist(figsize=(20, 20)) plt.show()
code
90148477/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() df.isnull().sum()
code
90148477/cell_14
[ "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] regr = linear_model.LinearRegression() regr.fit(x.values, y) predicted = regr.predict([[2.0, 10, 1500, 1200, 1, 1, 2000]]) print('predicted with 2.0 clock speed, 10 front camera, 1500x1200 px screen , have 3g and 4g and 2000 ram :', predicted) predicted = regr.predict([[2.0, 10, 1500, 1200, 1, 1, 4080]]) print('predicted with 2.0 clock speed, 10 front camera, 1500x1200 px screen , have 3g and 4g and 4080 ram :', predicted)
code
90148477/cell_22
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas import seaborn as sns df = pandas.read_csv('../input/mobile-price-classification/train.csv') corr = df.corr() x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] regr = linear_model.LinearRegression() regr.fit(x.values, y) df.isnull().sum() df.notnull().sum() def function(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df[['clock_speed', 'fc', 'px_height', 'px_width', 'three_g', 'four_g', 'ram']] y = df['price_range'] f = function(x, y) x = df[['ram']] y = df['price_range'] plt.plot(x, y, 'r.')
code
90148477/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas df = pandas.read_csv('../input/mobile-price-classification/train.csv') df.head()
code
128003791/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.head()
code
128003791/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) fig = px.histogram(rating_across_state, x = 'Rating') fig.show() average_cost = df.groupby(['City'])['Cost'].mean().reset_index() fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'}, title='Average Cost in Each City', color = 'City') fig.show() avg_vote = df.groupby(['City'])['Votes'].mean().reset_index() fig = px.bar(avg_vote, x='City', y='Votes', labels={'City': 'City', 'Name': 'Average Number of Votes of Restaurants'}, title='Average Votes in Each City', color = 'City') fig.show() max_votes = df.groupby(['City'])['Votes'].sum().reset_index() fig = px.bar(max_votes, x='City', y='Votes', labels={'City': 'City', 'Name': 'Number of Votes of Restaurants'}, title='Top Votes in Each City', color = 'City') fig.show() df_cuisine = df.groupby(['City', 'Cuisine'])['Name'].count().reset_index() df_top_cuisine = df_cuisine.loc[df_cuisine.groupby('City')['Name'].idxmax()] fig = px.bar(df_top_cuisine, x='City', y='Name', color='Cuisine', labels={'City': 'City', 'Name': 'Number of Restaurants'}, title='Top Cuisine in Each City') fig.show()
code
128003791/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum()
code
128003791/cell_2
[ "text_html_output_1.png" ]
import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import plotly.express as px
code
128003791/cell_18
[ "text_html_output_2.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) fig = px.histogram(rating_across_state, x = 'Rating') fig.show() average_cost = df.groupby(['City'])['Cost'].mean().reset_index() fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'}, title='Average Cost in Each City', color = 'City') fig.show() avg_vote = df.groupby(['City'])['Votes'].mean().reset_index() fig = px.bar(avg_vote, x='City', y='Votes', labels={'City': 'City', 'Name': 'Average Number of Votes of Restaurants'}, title='Average Votes in Each City', color = 'City') fig.show() max_votes = df.groupby(['City'])['Votes'].sum().reset_index() fig = px.bar(max_votes, x='City', y='Votes', labels={'City': 'City', 'Name': 'Number of Votes of Restaurants'}, title='Top Votes in Each City', color='City') fig.show()
code
128003791/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.figure(figsize=(10, 8)) plt.xlabel('City') plt.ylabel('Rating') sns.barplot(x='City', y='Rating', data=rating_across_state) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) plt.show()
code
128003791/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) fig = px.histogram(rating_across_state, x = 'Rating') fig.show() average_cost = df.groupby(['City'])['Cost'].mean().reset_index() fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'}, title='Average Cost in Each City', color = 'City') fig.show() avg_vote = df.groupby(['City'])['Votes'].mean().reset_index() fig = px.bar(avg_vote, x='City', y='Votes', labels={'City': 'City', 'Name': 'Average Number of Votes of Restaurants'}, title='Average Votes in Each City', color='City') fig.show()
code
128003791/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) fig = px.histogram(rating_across_state, x = 'Rating') fig.show() average_cost = df.groupby(['City'])['Cost'].mean().reset_index() fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'}, title='Average Cost in Each City', color = 'City') fig.show() plt.xticks(rotation=90, fontsize=12) sns.countplot(x=df['City'], data=df) plt.ylabel('Count of Restaurants')
code
128003791/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) fig = px.histogram(rating_across_state, x='Rating') fig.show()
code
128003791/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv') df.isnull().sum() rating_across_state = df.groupby('City').mean() rating_across_state.reset_index(level=0, inplace=True) plt.xticks(rotation=90, fontsize=12) plt.yticks(fontsize=12) plt.ylim(1, 5) fig = px.histogram(rating_across_state, x = 'Rating') fig.show() average_cost = df.groupby(['City'])['Cost'].mean().reset_index() fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'}, title='Average Cost in Each City', color='City') fig.show()
code
89135385/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import cv2 import tensorflow as tf PATH = '../input/rsna-bone-age/boneage-training-dataset/boneage-training-dataset/' IMGS = os.listdir(PATH) df = pd.read_csv('../input/rsna-bone-age/boneage-training-dataset.csv') def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def serialize_example(feature0, feature1, feature2, feature3): feature = {'image': _bytes_feature(feature0), 'id': _bytes_feature(feature1), 'boneage': _int64_feature(feature2), 'male': _int64_feature(feature3)} example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() SIZE = 841 CT = len(IMGS) // SIZE + int(len(IMGS) % SIZE != 0) for j in range(CT): print() print('Writing TFRecord %i of %i...' % (j, CT)) CT2 = min(SIZE, len(IMGS) - j * SIZE) with tf.io.TFRecordWriter('bone_age_tfrecords/train%.2i-%i.tfrec' % (j, CT2)) as writer: for k in range(CT2): img = cv2.imread(PATH + IMGS[SIZE * j + k]) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.imencode('.jpg', img, (cv2.IMWRITE_JPEG_QUALITY, 94))[1].tobytes() name = IMGS[SIZE * j + k].split('.')[0] row = df.loc[df['id'] == int(name)] example = serialize_example(img, str.encode(name), row.boneage.values[0], row.male.values[0]) writer.write(example) if k % 100 == 0: print(k, ', ', end='')
code
89135385/cell_4
[ "text_html_output_1.png" ]
df = pd.read_csv('../input/rsna-bone-age/boneage-training-dataset.csv') df.head()
code
89135385/cell_3
[ "text_plain_output_1.png" ]
PATH = '../input/rsna-bone-age/boneage-training-dataset/boneage-training-dataset/' IMGS = os.listdir(PATH) print('There are %i train images' % len(IMGS))
code
32068481/cell_6
[ "image_output_1.png" ]
from collections import OrderedDict from copy import deepcopy import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import plotly.offline as py submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x) for x in zip(train['Country_Region'], train['Province_State'])] test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x) for x in zip(test['Country_Region'], test['Province_State'])] train.sort_values(by='Date', inplace=True) test.sort_values(by='Date', inplace=True) day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min test['ConfirmedCases'] = np.nan test['Fatalities'] = np.nan train['ForecastId'] = np.nan test['Id'] = np.nan min_date_train = train['Date'].min() min_date_test = test['Date'].min() max_date_train = train['Date'].max() max_date_test = test['Date'].max() num_of_days_train = (max_date_train - min_date_train) / np.timedelta64(1, 'D') + 1 num_of_days = int((max_date_test - min_date_train) / np.timedelta64(1, 'D')) + 1 time_span0 = pd.date_range(min_date_train, max_date_test) time_span = [str(s.month) + '/' + str(s.day) for s in time_span0] forcast_days = int((max_date_test - max_date_train) / np.timedelta64(1, 'D')) from collections import OrderedDict countries_dict = OrderedDict() countries_dict['Afghanistan'] = [''] countries_dict['Italy'] = [''] countries_dict['India'] = [''] countries_dict['Germany'] = [''] countries_dict['Spain'] = [''] countries_dict['Taiwan*'] = [''] countries_dict['Japan'] = [''] countries_dict['Spain'] = [''] countries_dict['Germany'] = [''] countries_dict['Singapore'] = [''] countries_dict['Korea, South'] = [''] countries_dict['United Kingdom'] = [''] countries_dict['US'] = ['', 'Louisiana', 'New York', 'California', 'Minnesota'] from copy import deepcopy n = 50 N_places = sum([len(value) for key, value in countries_dict.items()]) False_mask_0 = [False] * (N_places * 2 + 1) labels = time_span[-n - 30:-30] x = time_span0[-n - 30:-30] data = [] manu_list = [] data.append(go.Bar(x=x, y=[0] * len(x), name='cases')) False_mask = deepcopy(False_mask_0) False_mask[0] = True manu_list.append(dict(label='Select', method='update', args=[{'visible': False_mask}, {'title': 'Select country/state'}])) n_place = -1 for country in countries_dict: for state in countries_dict[country]: sp = ' ' if state != '': sp = ', ' n_place += 1 data_i = train[(train['Province_State'] == state) & (train['Country_Region'] == country)].sort_values(by='Date').loc[:, ['day', 'ConfirmedCases', 'Fatalities']] if country in ['United Kingdom', 'Canada']: data_i = train[train['Country_Region'] == country].groupby('Date').sum().reset_index().sort_values(by='Date').loc[:, ['day', 'ConfirmedCases', 'Fatalities']] if country == 'US' and state == '': data_i = train[train['Country_Region'] == country].groupby('Date').sum().reset_index().sort_values(by='Date').loc[:, ['day', 'ConfirmedCases', 'Fatalities']] cases = country + state + ' Cases_daily' deaths = country + state + ' deaths_daily' data_i[cases] = data_i['ConfirmedCases'].diff() data_i[deaths] = data_i['Fatalities'].diff() trace1 = go.Bar(x=x, y=data_i[cases][-n:], name='cases') trace2 = go.Bar(x=x, y=data_i[deaths][-n:], name='deaths') data += [trace1, trace2] False_mask = deepcopy(False_mask_0) False_mask[2 * n_place + 1:2 * n_place + 2 + 1] = [True, True] manu_list.append(dict(label=country + sp + state, method='update', args=[{'visible': False_mask}, {'title': country + sp + state}])) updatemenus = [dict(active=0, buttons=manu_list, direction='down')] layout = dict(title='Select Countries and states', yaxis=dict(title='daily count', linecolor='rgba(255,255,255, 0.8)', showgrid=True, gridcolor='rgba(255,255,255,0.2)'), xaxis=dict(title='Date', linecolor='rgba(255,255,255, 0.8)', showgrid=True, gridcolor='rgba(255,255,255,0.2)'), margin=go.Margin(l=50, r=20), paper_bgcolor='rgb(105,105,105)', plot_bgcolor='RGB(228, 235, 234)', barmode='group', font={'color': 'RGB(179, 217, 82)'}, updatemenus=updatemenus, showlegend=True) fig = dict(data=data, layout=layout) py.iplot(fig, filename='relayout_option_dropdown')
code
32068481/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from datetime import timedelta from datetime import datetime import matplotlib.pyplot as plt from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from time import time import math import seaborn as sns import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068481/cell_8
[ "image_output_11.png", "text_plain_output_5.png", "text_plain_output_9.png", "image_output_14.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_8.png", "image_output_6.png", "image_output_12.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') train['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) train['day'] = train.Date.dt.dayofyear train['geo'] = ['_'.join(x) for x in zip(train['Country_Region'], train['Province_State'])] test['Province_State'].fillna('', inplace=True) test['Date'] = pd.to_datetime(test['Date']) test['day'] = test.Date.dt.dayofyear test['geo'] = ['_'.join(x) for x in zip(test['Country_Region'], test['Province_State'])] train.sort_values(by='Date', inplace=True) test.sort_values(by='Date', inplace=True) day_min = train['day'].min() train['day'] -= day_min test['day'] -= day_min test['ConfirmedCases'] = np.nan test['Fatalities'] = np.nan train['ForecastId'] = np.nan test['Id'] = np.nan min_date_train = train['Date'].min() min_date_test = test['Date'].min() max_date_train = train['Date'].max() max_date_test = test['Date'].max() num_of_days_train = (max_date_train - min_date_train) / np.timedelta64(1, 'D') + 1 num_of_days = int((max_date_test - min_date_train) / np.timedelta64(1, 'D')) + 1 time_span0 = pd.date_range(min_date_train, max_date_test) time_span = [str(s.month) + '/' + str(s.day) for s in time_span0] forcast_days = int((max_date_test - max_date_train) / np.timedelta64(1, 'D')) countries = dict() for cnt in train['Country_Region'].unique(): countries[cnt] = train.loc[train['Country_Region'] == cnt, 'Province_State'].unique() countries_test = dict() for cnt in test['Country_Region'].unique(): countries_test[cnt] = test.loc[test['Country_Region'] == cnt, 'Province_State'].unique() res = [] for country in countries: for state in countries[country]: if country != 'China': country_state_filter_train = (train['Province_State'] == state) & (train['Country_Region'] == country) sliced_data = train.loc[country_state_filter_train, :] history = sliced_data.loc[sliced_data['ConfirmedCases'] > 0, 'ConfirmedCases'].to_list() res.append(num_of_days_train - len(history)) aa = plt.figure() aa = plt.hist(res, color='blue', bins=10, range=(0, 80)) aa = plt.title('first Confirmed Case histogram: # of countries/provinces(except China) .VS. days from Wuhan Lockdown(1/22/2020)') res = [] for country in countries: for state in countries[country]: if country != 'China': country_state_filter_train = (train['Province_State'] == state) & (train['Country_Region'] == country) sliced_data = train.loc[country_state_filter_train, :] history = sliced_data.loc[sliced_data['Fatalities'] > 0, 'Fatalities'].to_list() res.append(num_of_days_train - len(history)) aa = plt.figure() aa = plt.hist(res, color='red', bins=10, range=(0, 80)) aa = plt.title('first death histogram: # of countries/provinces(except China) .VS. days from Wuhan Lockdown(1/22/2020)')
code
32068481/cell_3
[ "image_output_2.png", "image_output_1.png" ]
import plotly.offline as py import plotly.tools as tls import plotly.graph_objs as go py.init_notebook_mode(connected=True) import cufflinks as cf cf.set_config_file(offline=True, world_readable=True, theme='pearl') import folium import altair as alt import missingno as msg import sys import warnings if not sys.warnoptions: warnings.simplefilter('ignore') from ipywidgets import interact, interactive, fixed import pandas as pd import ipywidgets as widgets from IPython.display import display
code
121148904/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') val = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.head()
code
121148904/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') val = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') test_df.isna().sum()
code
121148904/cell_11
[ "text_html_output_1.png" ]
parameters_test['Fare'].fillna(value=4, inplace=True)
code
121148904/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') val = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') test_df.head()
code
121148904/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') val = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.isna().sum()
code
17133840/cell_9
[ "text_plain_output_1.png" ]
from keras.layers import BatchNormalization, Convolution2D , MaxPooling2D from keras.layers import Dense , Dropout , Lambda, Flatten from keras.layers.core import Lambda , Dense, Flatten, Dropout from keras.layers.noise import GaussianDropout from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.models import Sequential from keras.optimizers import Adam ,RMSprop from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') X_train = train.iloc[:, 1:].values.astype('float32') y_train = train.iloc[:, 0].values.astype('int32') X_test = test.values.astype('float32') X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) mean_px = X_train.mean().astype(np.float32) std_px = X_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px from keras.utils.np_utils import to_categorical y_train = to_categorical(y_train) num_classes = y_train.shape[1] from keras.models import Sequential from keras.layers.core import Lambda, Dense, Flatten, Dropout from keras.callbacks import EarlyStopping from keras.layers import BatchNormalization, Convolution2D, MaxPooling2D from keras.preprocessing import image from sklearn.model_selection import train_test_split X = X_train y = y_train X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42) gen = ImageDataGenerator(rotation_range=12, width_shift_range=0.16, shear_range=0.6, height_shift_range=0.16, zoom_range=0.16) batches = gen.flow(X_train, y_train, batch_size=64) val_batches = gen.flow(X_val, y_val, batch_size=64) from keras.layers.normalization import BatchNormalization from keras.layers.noise import GaussianDropout def get_bn_model(size, dropout): model = Sequential([Lambda(standardize, input_shape=(28, 28, 1)), Convolution2D(8 * size, (3, 3), activation='relu'), GaussianDropout(dropout), BatchNormalization(), Convolution2D(8 * size, (3, 3), activation='relu'), MaxPooling2D(), GaussianDropout(dropout), BatchNormalization(), Convolution2D(16 * size, (3, 3), activation='relu'), GaussianDropout(dropout), BatchNormalization(), Convolution2D(16 * size, (3, 3), activation='relu'), MaxPooling2D(), Flatten(), GaussianDropout(dropout), BatchNormalization(), Dense(128 * size, activation='relu'), GaussianDropout(dropout), BatchNormalization(), Dense(10, activation='softmax')]) model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy']) return model def augment_and_create_model(model_size, dropout_rate, aug): model = get_bn_model(model_size, dropout_rate) gen = ImageDataGenerator(rotation_range=3 * aug, width_shift_range=0.04 * aug, shear_range=0.15 * aug, height_shift_range=0.04 * aug, zoom_range=0.04 * aug) return (model, gen.flow(X_train, y_train, batch_size=64), gen.flow(X_val, y_val, batch_size=64)) models = [] epochs_to_train = 2 for x in range(1, 5): aug = x model_size = x dropout_rate = x * 0.1 pretrained = 0 for pretrained_epochs in range(epochs_to_train, 0, -1): try: model = load_model('model_size_{}_dropout_{}_augment_{}_epochs_{}'.format(x, 0.1 * x, x, pretrained_epochs)) pretrained = pretrained_epochs break except: pass if pretrained == 0: this_model = get_bn_model(model_size, dropout_rate) else: this_model = model gen = ImageDataGenerator(rotation_range=3 * aug, width_shift_range=0.04 * aug, shear_range=0.15 * aug, height_shift_range=0.04 * aug, zoom_range=0.04 * aug) batches, val_batches = (gen.flow(X_train, y_train, batch_size=64), gen.flow(X_val, y_val, batch_size=64)) models.append(this_model) models[-1].optimizer.lr = 0.01 history = models[-1].fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=epochs_to_train - pretrained, validation_data=val_batches, validation_steps=val_batches.n) this_model.save('model_size_{}_dropout_{}_augment_{}_epochs_{}.h5'.format(x, 0.1 * x, x, epochs_to_train), include_optimizer=False) best_loss = 0 best_loss_index = -1 for i in range(len(models)): model = models[i] print(model.history.history) this_loss = model.history.history['val_loss'][-1] + (model.history.history['val_loss'][-1] - model.history.history['loss'][-1]) ** 2 if this_loss > best_loss: best_loss = this_loss best_loss_index = i
code
17133840/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense, Dropout, Lambda, Flatten from keras.optimizers import Adam, RMSprop from sklearn.model_selection import train_test_split from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
17133840/cell_7
[ "text_plain_output_1.png" ]
from keras.layers import BatchNormalization, Convolution2D , MaxPooling2D from keras.layers import Dense , Dropout , Lambda, Flatten from keras.layers.core import Lambda , Dense, Flatten, Dropout from keras.layers.noise import GaussianDropout from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.models import Sequential from keras.optimizers import Adam ,RMSprop from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') X_train = train.iloc[:, 1:].values.astype('float32') y_train = train.iloc[:, 0].values.astype('int32') X_test = test.values.astype('float32') X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) mean_px = X_train.mean().astype(np.float32) std_px = X_train.std().astype(np.float32) def standardize(x): return (x - mean_px) / std_px from keras.utils.np_utils import to_categorical y_train = to_categorical(y_train) num_classes = y_train.shape[1] from keras.models import Sequential from keras.layers.core import Lambda, Dense, Flatten, Dropout from keras.callbacks import EarlyStopping from keras.layers import BatchNormalization, Convolution2D, MaxPooling2D from keras.preprocessing import image from sklearn.model_selection import train_test_split X = X_train y = y_train X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42) gen = ImageDataGenerator(rotation_range=12, width_shift_range=0.16, shear_range=0.6, height_shift_range=0.16, zoom_range=0.16) batches = gen.flow(X_train, y_train, batch_size=64) val_batches = gen.flow(X_val, y_val, batch_size=64) from keras.layers.normalization import BatchNormalization from keras.layers.noise import GaussianDropout def get_bn_model(size, dropout): model = Sequential([Lambda(standardize, input_shape=(28, 28, 1)), Convolution2D(8 * size, (3, 3), activation='relu'), GaussianDropout(dropout), BatchNormalization(), Convolution2D(8 * size, (3, 3), activation='relu'), MaxPooling2D(), GaussianDropout(dropout), BatchNormalization(), Convolution2D(16 * size, (3, 3), activation='relu'), GaussianDropout(dropout), BatchNormalization(), Convolution2D(16 * size, (3, 3), activation='relu'), MaxPooling2D(), Flatten(), GaussianDropout(dropout), BatchNormalization(), Dense(128 * size, activation='relu'), GaussianDropout(dropout), BatchNormalization(), Dense(10, activation='softmax')]) model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy']) return model def augment_and_create_model(model_size, dropout_rate, aug): model = get_bn_model(model_size, dropout_rate) gen = ImageDataGenerator(rotation_range=3 * aug, width_shift_range=0.04 * aug, shear_range=0.15 * aug, height_shift_range=0.04 * aug, zoom_range=0.04 * aug) return (model, gen.flow(X_train, y_train, batch_size=64), gen.flow(X_val, y_val, batch_size=64)) models = [] epochs_to_train = 2 for x in range(1, 5): aug = x model_size = x dropout_rate = x * 0.1 print('Training a model with size level {}, dropout rate {}, and augmentation level {}'.format(model_size, dropout_rate, aug)) pretrained = 0 print('Attempting to load saved model.') for pretrained_epochs in range(epochs_to_train, 0, -1): try: model = load_model('model_size_{}_dropout_{}_augment_{}_epochs_{}'.format(x, 0.1 * x, x, pretrained_epochs)) print('Loaded existed model trained for {} epochs out of {}'.format(pretrained_epochs, epochs_to_train)) pretrained = pretrained_epochs break except: pass if pretrained == 0: print('Failed to load trained model. Creating new model.') this_model = get_bn_model(model_size, dropout_rate) else: this_model = model gen = ImageDataGenerator(rotation_range=3 * aug, width_shift_range=0.04 * aug, shear_range=0.15 * aug, height_shift_range=0.04 * aug, zoom_range=0.04 * aug) batches, val_batches = (gen.flow(X_train, y_train, batch_size=64), gen.flow(X_val, y_val, batch_size=64)) models.append(this_model) models[-1].optimizer.lr = 0.01 history = models[-1].fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=epochs_to_train - pretrained, validation_data=val_batches, validation_steps=val_batches.n) this_model.save('model_size_{}_dropout_{}_augment_{}_epochs_{}.h5'.format(x, 0.1 * x, x, epochs_to_train), include_optimizer=False)
code
89132099/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes train.OverallQual.unique() quality_pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median) quality_pivot
code
89132099/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes
code
89132099/cell_4
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns
code
89132099/cell_6
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape train.SalePrice.describe()
code
89132099/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes train.OverallQual.unique()
code
89132099/cell_1
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89132099/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape print('Skew is:', train.SalePrice.skew()) plt.hist(train.SalePrice, color='blue') plt.show()
code
89132099/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes train.OverallQual.unique() quality_pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median) plt.xticks(rotation=0) train = train[train['GarageArea'] < 1200] plt.scatter(x=train['GarageArea'], y=np.log(train.SalePrice)) plt.xlim(-200, 1600) plt.ylabel('Sale Price') plt.xlabel('Garage Area') plt.show()
code
89132099/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) print('Skew is:', target.skew()) plt.hist(target, color='blue') plt.show()
code
89132099/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes train.OverallQual.unique() quality_pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median) plt.xticks(rotation=0) plt.scatter(x=train['GrLivArea'], y=target) plt.ylabel('Sale Price') plt.xlabel('Above grade (ground) living area square feet') plt.show()
code
89132099/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes train.OverallQual.unique() quality_pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median) plt.xticks(rotation=0) plt.scatter(x=train['GarageArea'], y=target) plt.ylabel('Sale Price') plt.xlabel('Garage Area') plt.show()
code
89132099/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.head()
code
89132099/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes train.OverallQual.unique() quality_pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median) quality_pivot.plot(kind='bar', color='blue') plt.xlabel('Overall Quality') plt.ylabel('Median Sale Price') plt.xticks(rotation=0) plt.show()
code
89132099/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use(style='ggplot') plt.rcParams['figure.figsize'] = (10, 6) import seaborn as sns import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape target = np.log(train.SalePrice) numeric_features = train.select_dtypes(include=[np.number]) numeric_features.dtypes corr = numeric_features.corr() print(corr['SalePrice'].sort_values(ascending=False)[:5], '\n') print(corr['SalePrice'].sort_values(ascending=False)[-5:])
code
89132099/cell_5
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.columns train.shape
code
17115900/cell_13
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np import xml.etree.ElementTree as ET ComputeLB = True DogsOnly = True import numpy as np, pandas as pd, os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt, zipfile from PIL import Image from tqdm import tqdm_notebook ROOT = '../input/generative-dog-images/' if not ComputeLB: ROOT = '../input/' IMAGES = os.listdir(ROOT + 'all-dogs/all-dogs/') breeds = os.listdir(ROOT + 'annotation/Annotation/') idxIn = 0 namesIn = [] imagesIn = np.zeros((25000, 64, 64, 3)) if DogsOnly: for breed in tqdm_notebook(breeds): for dog in os.listdir(ROOT + 'annotation/Annotation/' + breed): try: img = Image.open(ROOT + 'all-dogs/all-dogs/' + dog + '.jpg') except: continue tree = ET.parse(ROOT + 'annotation/Annotation/' + breed + '/' + dog) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) w_, h_ = img.size w = np.max((xmax - xmin, ymax - ymin)) img2 = img.crop((xmin, ymin, min(xmin + w, w_), min(ymin + w, h_))) img2 = img2.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img2) namesIn.append(breed) idxIn += 1 idx = np.arange(idxIn) np.random.shuffle(idx) imagesIn = imagesIn[idx, :, :, :] namesIn = np.array(namesIn)[idx] else: x = np.random.choice(np.arange(20579), 10000) for k in tqdm_notebook(range(len(x))): img = Image.open(ROOT + 'all-dogs/all-dogs/' + IMAGES[x[k]]) w = img.size[0] h = img.size[1] sz = np.min((w, h)) a = 0 b = 0 if w < h: b = (h - sz) // 2 else: a = (w - sz) // 2 img = img.crop((0 + a, 0 + b, sz + a, sz + b)) img = img.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img) namesIn.append(IMAGES[x[k]]) idxIn += 1 x = np.random.randint(0, idxIn, 25) for k in range(5): for j in range(5): img = Image.fromarray(imagesIn[x[k * 5 + j], :, :, :].astype('uint8')) plt.axis('off') from torch.utils.data import TensorDataset, DataLoader from collections import defaultdict from sklearn.model_selection import train_test_split imagesIntorch = np.array([np.array(image).transpose(2, 1, 0) for image in imagesIn]) dogs = list(set(namesIn)) len_dogs = len(dogs) dog2id = {dogs[i]: i for i in range(len(dogs))} id2dog = {v: k for k, v in dog2id.items()} idIn = [dog2id[name] for name in namesIn] train_X, validation_X, train_y, validation_y = train_test_split(imagesIntorch, idIn, test_size=0.2, random_state=620402) (np.array(train_X).shape, np.array(validation_X).shape, np.array(train_y).shape, np.array(validation_y).shape) data_variance = np.var(train_X) data_variance
code
17115900/cell_9
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np import xml.etree.ElementTree as ET ComputeLB = True DogsOnly = True import numpy as np, pandas as pd, os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt, zipfile from PIL import Image from tqdm import tqdm_notebook ROOT = '../input/generative-dog-images/' if not ComputeLB: ROOT = '../input/' IMAGES = os.listdir(ROOT + 'all-dogs/all-dogs/') breeds = os.listdir(ROOT + 'annotation/Annotation/') idxIn = 0 namesIn = [] imagesIn = np.zeros((25000, 64, 64, 3)) if DogsOnly: for breed in tqdm_notebook(breeds): for dog in os.listdir(ROOT + 'annotation/Annotation/' + breed): try: img = Image.open(ROOT + 'all-dogs/all-dogs/' + dog + '.jpg') except: continue tree = ET.parse(ROOT + 'annotation/Annotation/' + breed + '/' + dog) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) w_, h_ = img.size w = np.max((xmax - xmin, ymax - ymin)) img2 = img.crop((xmin, ymin, min(xmin + w, w_), min(ymin + w, h_))) img2 = img2.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img2) namesIn.append(breed) idxIn += 1 idx = np.arange(idxIn) np.random.shuffle(idx) imagesIn = imagesIn[idx, :, :, :] namesIn = np.array(namesIn)[idx] else: x = np.random.choice(np.arange(20579), 10000) for k in tqdm_notebook(range(len(x))): img = Image.open(ROOT + 'all-dogs/all-dogs/' + IMAGES[x[k]]) w = img.size[0] h = img.size[1] sz = np.min((w, h)) a = 0 b = 0 if w < h: b = (h - sz) // 2 else: a = (w - sz) // 2 img = img.crop((0 + a, 0 + b, sz + a, sz + b)) img = img.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img) namesIn.append(IMAGES[x[k]]) idxIn += 1 x = np.random.randint(0, idxIn, 25) for k in range(5): for j in range(5): img = Image.fromarray(imagesIn[x[k * 5 + j], :, :, :].astype('uint8')) plt.axis('off') from torch.utils.data import TensorDataset, DataLoader from collections import defaultdict from sklearn.model_selection import train_test_split print(f'The shape of image is {imagesIn.shape}, the shape of imagename is {namesIn.shape}') imagesIntorch = np.array([np.array(image).transpose(2, 1, 0) for image in imagesIn]) print(f'The shape of reshaped image is {imagesIntorch.shape}') dogs = list(set(namesIn)) len_dogs = len(dogs) print(f'the number of dogs is {len_dogs}') dog2id = {dogs[i]: i for i in range(len(dogs))} id2dog = {v: k for k, v in dog2id.items()} idIn = [dog2id[name] for name in namesIn] train_X, validation_X, train_y, validation_y = train_test_split(imagesIntorch, idIn, test_size=0.2, random_state=620402)
code
17115900/cell_6
[ "text_plain_output_1.png" ]
import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device
code
17115900/cell_8
[ "text_plain_output_1.png" ]
from PIL import Image from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np import xml.etree.ElementTree as ET ComputeLB = True DogsOnly = True import numpy as np, pandas as pd, os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt, zipfile from PIL import Image from tqdm import tqdm_notebook ROOT = '../input/generative-dog-images/' if not ComputeLB: ROOT = '../input/' IMAGES = os.listdir(ROOT + 'all-dogs/all-dogs/') breeds = os.listdir(ROOT + 'annotation/Annotation/') idxIn = 0 namesIn = [] imagesIn = np.zeros((25000, 64, 64, 3)) if DogsOnly: for breed in tqdm_notebook(breeds): for dog in os.listdir(ROOT + 'annotation/Annotation/' + breed): try: img = Image.open(ROOT + 'all-dogs/all-dogs/' + dog + '.jpg') except: continue tree = ET.parse(ROOT + 'annotation/Annotation/' + breed + '/' + dog) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) w_, h_ = img.size w = np.max((xmax - xmin, ymax - ymin)) img2 = img.crop((xmin, ymin, min(xmin + w, w_), min(ymin + w, h_))) img2 = img2.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img2) namesIn.append(breed) idxIn += 1 idx = np.arange(idxIn) np.random.shuffle(idx) imagesIn = imagesIn[idx, :, :, :] namesIn = np.array(namesIn)[idx] else: x = np.random.choice(np.arange(20579), 10000) for k in tqdm_notebook(range(len(x))): img = Image.open(ROOT + 'all-dogs/all-dogs/' + IMAGES[x[k]]) w = img.size[0] h = img.size[1] sz = np.min((w, h)) a = 0 b = 0 if w < h: b = (h - sz) // 2 else: a = (w - sz) // 2 img = img.crop((0 + a, 0 + b, sz + a, sz + b)) img = img.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img) namesIn.append(IMAGES[x[k]]) if idxIn % 1000 == 0: print(idxIn) idxIn += 1 x = np.random.randint(0, idxIn, 25) for k in range(5): plt.figure(figsize=(15, 3)) for j in range(5): plt.subplot(1, 5, j + 1) img = Image.fromarray(imagesIn[x[k * 5 + j], :, :, :].astype('uint8')) plt.axis('off') if not DogsOnly: plt.title(namesIn[x[k * 5 + j]], fontsize=11) else: plt.title(namesIn[x[k * 5 + j]].split('-')[1], fontsize=11) plt.imshow(img) plt.show()
code
17115900/cell_16
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import TensorDataset, DataLoader from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np import torch import torch import torch.nn as nn import xml.etree.ElementTree as ET device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device ComputeLB = True DogsOnly = True import numpy as np, pandas as pd, os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt, zipfile from PIL import Image from tqdm import tqdm_notebook ROOT = '../input/generative-dog-images/' if not ComputeLB: ROOT = '../input/' IMAGES = os.listdir(ROOT + 'all-dogs/all-dogs/') breeds = os.listdir(ROOT + 'annotation/Annotation/') idxIn = 0 namesIn = [] imagesIn = np.zeros((25000, 64, 64, 3)) if DogsOnly: for breed in tqdm_notebook(breeds): for dog in os.listdir(ROOT + 'annotation/Annotation/' + breed): try: img = Image.open(ROOT + 'all-dogs/all-dogs/' + dog + '.jpg') except: continue tree = ET.parse(ROOT + 'annotation/Annotation/' + breed + '/' + dog) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) w_, h_ = img.size w = np.max((xmax - xmin, ymax - ymin)) img2 = img.crop((xmin, ymin, min(xmin + w, w_), min(ymin + w, h_))) img2 = img2.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img2) namesIn.append(breed) idxIn += 1 idx = np.arange(idxIn) np.random.shuffle(idx) imagesIn = imagesIn[idx, :, :, :] namesIn = np.array(namesIn)[idx] else: x = np.random.choice(np.arange(20579), 10000) for k in tqdm_notebook(range(len(x))): img = Image.open(ROOT + 'all-dogs/all-dogs/' + IMAGES[x[k]]) w = img.size[0] h = img.size[1] sz = np.min((w, h)) a = 0 b = 0 if w < h: b = (h - sz) // 2 else: a = (w - sz) // 2 img = img.crop((0 + a, 0 + b, sz + a, sz + b)) img = img.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img) namesIn.append(IMAGES[x[k]]) idxIn += 1 x = np.random.randint(0, idxIn, 25) for k in range(5): for j in range(5): img = Image.fromarray(imagesIn[x[k * 5 + j], :, :, :].astype('uint8')) plt.axis('off') from torch.utils.data import TensorDataset, DataLoader from collections import defaultdict from sklearn.model_selection import train_test_split imagesIntorch = np.array([np.array(image).transpose(2, 1, 0) for image in imagesIn]) dogs = list(set(namesIn)) len_dogs = len(dogs) dog2id = {dogs[i]: i for i in range(len(dogs))} id2dog = {v: k for k, v in dog2id.items()} idIn = [dog2id[name] for name in namesIn] train_X, validation_X, train_y, validation_y = train_test_split(imagesIntorch, idIn, test_size=0.2, random_state=620402) (np.array(train_X).shape, np.array(validation_X).shape, np.array(train_y).shape, np.array(validation_y).shape) import torch training_data = TensorDataset(torch.Tensor(train_X), torch.Tensor(train_y)) validation_data = TensorDataset(torch.Tensor(validation_X), torch.Tensor(validation_y)) data_variance = np.var(train_X) data_variance class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost): super(VectorQuantizer, self).__init__() self._embedding_dim = embedding_dim self._num_embeddings = num_embeddings self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim) self._embedding.weight.data.uniform_(-1 / self._num_embeddings, 1 / self._num_embeddings) self._commitment_cost = commitment_cost def forward(self, inputs): inputs = inputs.permute(0, 2, 3, 1).contiguous() input_shape = inputs.shape flat_input = inputs.view(-1, self._embedding_dim) distances = torch.sum(flat_input ** 2, dim=1, keepdim=True) + torch.sum(self._embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_input, self._embedding.weight.t()) encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1) encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings).to(device) quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape) e_latent_loss = torch.mean((quantized.detach() - inputs) ** 2) q_latent_loss = torch.mean((quantized - inputs.detach()) ** 2) loss = q_latent_loss + self._commitment_cost * e_latent_loss quantized = inputs + (quantized - inputs).detach() avg_probs = torch.mean(encodings, dim=0) perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) return (loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings) num_embeddings, embedding_dim, commitment_cost = (64, 512, 0.25) testVectorQuantizer = VectorQuantizer(num_embeddings, embedding_dim, commitment_cost) testVectorQuantizer.cuda() input_tensor = torch.Tensor(np.random.normal(size=[32, 64, 4, 4])) print(input_tensor.shape) _, output_tensor, perplexity, encodings = testVectorQuantizer(input_tensor.cuda()) print(output_tensor.shape) print(encodings.shape) print(perplexity.shape)
code
17115900/cell_17
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import TensorDataset, DataLoader from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np import torch import torch import torch.nn as nn import xml.etree.ElementTree as ET device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device ComputeLB = True DogsOnly = True import numpy as np, pandas as pd, os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt, zipfile from PIL import Image from tqdm import tqdm_notebook ROOT = '../input/generative-dog-images/' if not ComputeLB: ROOT = '../input/' IMAGES = os.listdir(ROOT + 'all-dogs/all-dogs/') breeds = os.listdir(ROOT + 'annotation/Annotation/') idxIn = 0 namesIn = [] imagesIn = np.zeros((25000, 64, 64, 3)) if DogsOnly: for breed in tqdm_notebook(breeds): for dog in os.listdir(ROOT + 'annotation/Annotation/' + breed): try: img = Image.open(ROOT + 'all-dogs/all-dogs/' + dog + '.jpg') except: continue tree = ET.parse(ROOT + 'annotation/Annotation/' + breed + '/' + dog) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) w_, h_ = img.size w = np.max((xmax - xmin, ymax - ymin)) img2 = img.crop((xmin, ymin, min(xmin + w, w_), min(ymin + w, h_))) img2 = img2.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img2) namesIn.append(breed) idxIn += 1 idx = np.arange(idxIn) np.random.shuffle(idx) imagesIn = imagesIn[idx, :, :, :] namesIn = np.array(namesIn)[idx] else: x = np.random.choice(np.arange(20579), 10000) for k in tqdm_notebook(range(len(x))): img = Image.open(ROOT + 'all-dogs/all-dogs/' + IMAGES[x[k]]) w = img.size[0] h = img.size[1] sz = np.min((w, h)) a = 0 b = 0 if w < h: b = (h - sz) // 2 else: a = (w - sz) // 2 img = img.crop((0 + a, 0 + b, sz + a, sz + b)) img = img.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img) namesIn.append(IMAGES[x[k]]) idxIn += 1 x = np.random.randint(0, idxIn, 25) for k in range(5): for j in range(5): img = Image.fromarray(imagesIn[x[k * 5 + j], :, :, :].astype('uint8')) plt.axis('off') from torch.utils.data import TensorDataset, DataLoader from collections import defaultdict from sklearn.model_selection import train_test_split imagesIntorch = np.array([np.array(image).transpose(2, 1, 0) for image in imagesIn]) dogs = list(set(namesIn)) len_dogs = len(dogs) dog2id = {dogs[i]: i for i in range(len(dogs))} id2dog = {v: k for k, v in dog2id.items()} idIn = [dog2id[name] for name in namesIn] train_X, validation_X, train_y, validation_y = train_test_split(imagesIntorch, idIn, test_size=0.2, random_state=620402) (np.array(train_X).shape, np.array(validation_X).shape, np.array(train_y).shape, np.array(validation_y).shape) import torch training_data = TensorDataset(torch.Tensor(train_X), torch.Tensor(train_y)) validation_data = TensorDataset(torch.Tensor(validation_X), torch.Tensor(validation_y)) data_variance = np.var(train_X) data_variance class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim, commitment_cost): super(VectorQuantizer, self).__init__() self._embedding_dim = embedding_dim self._num_embeddings = num_embeddings self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim) self._embedding.weight.data.uniform_(-1 / self._num_embeddings, 1 / self._num_embeddings) self._commitment_cost = commitment_cost def forward(self, inputs): inputs = inputs.permute(0, 2, 3, 1).contiguous() input_shape = inputs.shape flat_input = inputs.view(-1, self._embedding_dim) distances = torch.sum(flat_input ** 2, dim=1, keepdim=True) + torch.sum(self._embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_input, self._embedding.weight.t()) encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1) encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings).to(device) quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape) e_latent_loss = torch.mean((quantized.detach() - inputs) ** 2) q_latent_loss = torch.mean((quantized - inputs.detach()) ** 2) loss = q_latent_loss + self._commitment_cost * e_latent_loss quantized = inputs + (quantized - inputs).detach() avg_probs = torch.mean(encodings, dim=0) perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) return (loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings) num_embeddings, embedding_dim, commitment_cost = (64, 512, 0.25) testVectorQuantizer = VectorQuantizer(num_embeddings, embedding_dim, commitment_cost) testVectorQuantizer.cuda() input_tensor = torch.Tensor(np.random.normal(size=[32, 64, 4, 4])) _, output_tensor, perplexity, encodings = testVectorQuantizer(input_tensor.cuda()) (encodings, perplexity)
code
17115900/cell_10
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from tqdm import tqdm_notebook import matplotlib.pyplot as plt import numpy as np import xml.etree.ElementTree as ET ComputeLB = True DogsOnly = True import numpy as np, pandas as pd, os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt, zipfile from PIL import Image from tqdm import tqdm_notebook ROOT = '../input/generative-dog-images/' if not ComputeLB: ROOT = '../input/' IMAGES = os.listdir(ROOT + 'all-dogs/all-dogs/') breeds = os.listdir(ROOT + 'annotation/Annotation/') idxIn = 0 namesIn = [] imagesIn = np.zeros((25000, 64, 64, 3)) if DogsOnly: for breed in tqdm_notebook(breeds): for dog in os.listdir(ROOT + 'annotation/Annotation/' + breed): try: img = Image.open(ROOT + 'all-dogs/all-dogs/' + dog + '.jpg') except: continue tree = ET.parse(ROOT + 'annotation/Annotation/' + breed + '/' + dog) root = tree.getroot() objects = root.findall('object') for o in objects: bndbox = o.find('bndbox') xmin = int(bndbox.find('xmin').text) ymin = int(bndbox.find('ymin').text) xmax = int(bndbox.find('xmax').text) ymax = int(bndbox.find('ymax').text) w_, h_ = img.size w = np.max((xmax - xmin, ymax - ymin)) img2 = img.crop((xmin, ymin, min(xmin + w, w_), min(ymin + w, h_))) img2 = img2.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img2) namesIn.append(breed) idxIn += 1 idx = np.arange(idxIn) np.random.shuffle(idx) imagesIn = imagesIn[idx, :, :, :] namesIn = np.array(namesIn)[idx] else: x = np.random.choice(np.arange(20579), 10000) for k in tqdm_notebook(range(len(x))): img = Image.open(ROOT + 'all-dogs/all-dogs/' + IMAGES[x[k]]) w = img.size[0] h = img.size[1] sz = np.min((w, h)) a = 0 b = 0 if w < h: b = (h - sz) // 2 else: a = (w - sz) // 2 img = img.crop((0 + a, 0 + b, sz + a, sz + b)) img = img.resize((64, 64), Image.ANTIALIAS) imagesIn[idxIn, :, :, :] = np.asarray(img) namesIn.append(IMAGES[x[k]]) idxIn += 1 x = np.random.randint(0, idxIn, 25) for k in range(5): for j in range(5): img = Image.fromarray(imagesIn[x[k * 5 + j], :, :, :].astype('uint8')) plt.axis('off') from torch.utils.data import TensorDataset, DataLoader from collections import defaultdict from sklearn.model_selection import train_test_split imagesIntorch = np.array([np.array(image).transpose(2, 1, 0) for image in imagesIn]) dogs = list(set(namesIn)) len_dogs = len(dogs) dog2id = {dogs[i]: i for i in range(len(dogs))} id2dog = {v: k for k, v in dog2id.items()} idIn = [dog2id[name] for name in namesIn] train_X, validation_X, train_y, validation_y = train_test_split(imagesIntorch, idIn, test_size=0.2, random_state=620402) (np.array(train_X).shape, np.array(validation_X).shape, np.array(train_y).shape, np.array(validation_y).shape)
code
128032351/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df = df.dropna() df.describe(include='O').T df.sample(2)
code
128032351/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df = df.dropna() df.describe(include='O').T df.sample(2) df = df.drop('Unnamed: 0', axis=1) df.sample(2)
code
128032351/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df.info()
code
128032351/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df = df.dropna() df.describe(include='O').T df.sample(2) df = df.drop('Unnamed: 0', axis=1) df.sample(2) df.groupby('Hotel_name').sum()['Rating'].sort_values(ascending=False)[:5]
code
128032351/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df = df.dropna() df.describe(include='O').T df.sample(2) df = df.drop('Unnamed: 0', axis=1) df.sample(2) df.groupby('Hotel_name').sum()['Rating'].sort_values(ascending=False)[:5] df.groupby('Hotel_name').sum()['Price'].sort_values(ascending=False)[:5]
code
128032351/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/oyo-hotel-rooms/OYO_HOTEL_ROOMS.csv') df = df.dropna() df.describe(include='O').T
code
130017723/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import numpy as np reg = LinearRegression() model = reg.fit(x_train, y_train) y_pred = model.predict(x_test).round() def diabetes_prediction(): preg = int(input('Enter the prega value:')) glu = int(input('Enter the glu value:')) BP = int(input('Enter the BP value:')) SkinThik = int(input('Enter the SkinThink value:')) Insulin = int(input('Enter the Insulin value:')) Age = int(input('Enter the Age value:')) Value_testing = np.array([preg, glu, BP, SkinThik, Insulin, Age]).reshape(-1, 6) prediction = model.predict(Value_testing).round() diabetes_prediction()
code
18149171/cell_13
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/house-votes.csv', na_values=['?']) df.replace('^y$', value=1, regex=True, inplace=True) df.replace('^n$', value=0, regex=True, inplace=True) df.fillna(0, inplace=True) df.to_csv('house-votes-edited.csv') from sklearn.neighbors import KNeighborsClassifier y = df['party'].values X = df.drop('party', axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y) X_new = np.array([[0.44764519, 0.95034062, 0.43959532, 0.80122238, 0.26844483, 0.45513802, 0.16595416, 0.56314597, 0.87505639, 0.92836397, 0.80958641, 0.01591928, 0.0294, 0.42548396, 0.65489058, 0.77928102]]) X_new.shape from sklearn.neighbors import KNeighborsClassifier y = df['party'] X = df.drop(['party'], axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y) y_pred = knn.predict(X) new_prediction = knn.predict(X_new) from sklearn import datasets import matplotlib.pyplot as plt digits = datasets.load_digits() from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split X = digits.data y = digits.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y) knn = KNeighborsClassifier(n_neighbors=7) knn.fit(X_train, y_train) print(knn.score(X_test, y_test))
code
18149171/cell_9
[ "image_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier import numpy as np import pandas as pd df = pd.read_csv('../input/house-votes.csv', na_values=['?']) df.replace('^y$', value=1, regex=True, inplace=True) df.replace('^n$', value=0, regex=True, inplace=True) df.fillna(0, inplace=True) df.to_csv('house-votes-edited.csv') from sklearn.neighbors import KNeighborsClassifier y = df['party'].values X = df.drop('party', axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y) X_new = np.array([[0.44764519, 0.95034062, 0.43959532, 0.80122238, 0.26844483, 0.45513802, 0.16595416, 0.56314597, 0.87505639, 0.92836397, 0.80958641, 0.01591928, 0.0294, 0.42548396, 0.65489058, 0.77928102]]) X_new.shape from sklearn.neighbors import KNeighborsClassifier y = df['party'] X = df.drop(['party'], axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y) y_pred = knn.predict(X) new_prediction = knn.predict(X_new) print('Prediction: {}'.format(new_prediction))
code
18149171/cell_11
[ "text_html_output_1.png" ]
from sklearn import datasets import matplotlib.pyplot as plt from sklearn import datasets import matplotlib.pyplot as plt digits = datasets.load_digits() print(digits.keys()) print(digits['DESCR']) print(digits.images.shape) print(digits.data.shape) plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest') plt.show()
code
18149171/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import pandas as pd df = pd.read_csv('../input/house-votes.csv', na_values=['?']) df.replace('^y$', value=1, regex=True, inplace=True) df.replace('^n$', value=0, regex=True, inplace=True) df.fillna(0, inplace=True) df.to_csv('house-votes-edited.csv') from sklearn.neighbors import KNeighborsClassifier y = df['party'].values X = df.drop('party', axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y)
code
18149171/cell_8
[ "text_plain_output_1.png" ]
import numpy as np X_new = np.array([[0.44764519, 0.95034062, 0.43959532, 0.80122238, 0.26844483, 0.45513802, 0.16595416, 0.56314597, 0.87505639, 0.92836397, 0.80958641, 0.01591928, 0.0294, 0.42548396, 0.65489058, 0.77928102]]) X_new.shape
code
18149171/cell_15
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/house-votes.csv', na_values=['?']) df.replace('^y$', value=1, regex=True, inplace=True) df.replace('^n$', value=0, regex=True, inplace=True) df.fillna(0, inplace=True) df.to_csv('house-votes-edited.csv') from sklearn.neighbors import KNeighborsClassifier y = df['party'].values X = df.drop('party', axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y) X_new = np.array([[0.44764519, 0.95034062, 0.43959532, 0.80122238, 0.26844483, 0.45513802, 0.16595416, 0.56314597, 0.87505639, 0.92836397, 0.80958641, 0.01591928, 0.0294, 0.42548396, 0.65489058, 0.77928102]]) X_new.shape from sklearn.neighbors import KNeighborsClassifier y = df['party'] X = df.drop(['party'], axis=1).values knn = KNeighborsClassifier(n_neighbors=6) knn.fit(X, y) y_pred = knn.predict(X) new_prediction = knn.predict(X_new) from sklearn import datasets import matplotlib.pyplot as plt digits = datasets.load_digits() from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split X = digits.data y = digits.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y) knn = KNeighborsClassifier(n_neighbors=7) knn.fit(X_train, y_train) neighbors = np.arange(1, 9) train_accuracy = np.empty(len(neighbors)) test_accuracy = np.empty(len(neighbors)) for i, k in enumerate(neighbors): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) train_accuracy[i] = knn.score(X_train, y_train) test_accuracy[i] = knn.score(X_test, y_test) plt.title('k-NN: Varying Number of Neighbors') plt.plot(neighbors, test_accuracy, label='Testing Accuracy') plt.plot(neighbors, train_accuracy, label='Training Accuracy') plt.legend() plt.xlabel('Number of Neighbors') plt.ylabel('Accuracy') plt.show()
code
18149171/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/house-votes.csv', na_values=['?']) df.replace('^y$', value=1, regex=True, inplace=True) df.replace('^n$', value=0, regex=True, inplace=True) df.fillna(0, inplace=True) df.head()
code
50222580/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T (train_df.size, train_df.shape) train_df.isnull().any() train_df.isnull().sum() train_df.columns train_df.isnull().any()
code
50222580/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T
code
50222580/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv') gender_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/gender_submission.csv') gender_df.describe().T
code
50222580/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T (train_df.size, train_df.shape) train_df.isnull().any() train_df.isnull().sum() train_df.columns train_df.isnull().any() train_df.isnull().any()
code
50222580/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv') test_df.describe().T
code
50222580/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T (train_df.size, train_df.shape) train_df.isnull().any() train_df.isnull().sum() train_df.columns
code
50222580/cell_1
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50222580/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T (train_df.size, train_df.shape) train_df.isnull().any() train_df.isnull().sum()
code
50222580/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv') gender_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/gender_submission.csv') gender_df.head()
code
50222580/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv') test_df.describe().T (test_df.size, test_df.shape)
code
50222580/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv') gender_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/gender_submission.csv') gender_df.describe().T (gender_df.size, gender_df.shape)
code
50222580/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T (train_df.size, train_df.shape) train_df.isnull().any()
code
50222580/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.describe().T (train_df.size, train_df.shape)
code
50222580/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv') test_df.head()
code
50222580/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv') train_df.head()
code
121154415/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' data.info()
code
121154415/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') df_peptides.head()
code
121154415/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') df_peptides.info()
code
121154415/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data ppp = pd.DataFrame(df_proteins.groupby('UniProt').patient_id.nunique()).rename(columns={'patient_id': 'count_patient'}).reset_index().sort_values('count_patient', ascending=False) ppp.tail(10)
code
121154415/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' print('number of patients: ', data.patient_id.nunique()) print('number of proteins: ', data.UniProt.nunique())
code
121154415/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' data[['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4', 'upd23b_clinical_state_on_medication']].isnull().sum()
code
121154415/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') print('df_proteins: ', df_proteins.columns) print('df_peptides: ', df_peptides.columns) print('df_cd: ', df_cd.columns)
code
121154415/cell_41
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' ppp = pd.DataFrame(df_proteins.groupby('UniProt').patient_id.nunique()).rename(columns={'patient_id': 'count_patient'}).reset_index().sort_values('count_patient', ascending=False) data.loc[data.upd23b_clinical_state_on_medication == 'On', 'upd23b_clinical_state_on_medication'] = 1 data.loc[data.upd23b_clinical_state_on_medication == 'Off', 'upd23b_clinical_state_on_medication'] = 0 plt.figure(figsize=(8, 5)) color = 'RdYlGn' sns.heatmap(data.corr(), annot=True, linewidth=0.5, cmap=color)
code
121154415/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data plt.figure(figsize=(8, 5)) color = 'RdYlGn' sns.heatmap(data.corr(), annot=True, linewidth=0.5, cmap=color)
code