path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
32067430/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32067430/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
avg_off = team_stats['ADJOE'].mean()
avg_def = team_stats['ADJDE'].mean()
avg_def - team_stats[team_stats['POSTSEASON'] == 'Champions']['ADJDE'].mean() | code |
32067430/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
avg_off = team_stats['ADJOE'].mean()
avg_def = team_stats['ADJDE'].mean()
print(avg_off, avg_def, sep=',') | code |
32070671/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.text(9.6, 120, 'average', fontsize=14)
def transf(x):
if x == 0:
return 1
else:
return x
def weight_df(df, col_start=12):
fact = df[df.columns[col_start:]].sum(axis=1).apply(lambda x: transf(x))
df_va = df.values
for m in range(len(df_va)):
df_va[m]
for i in range(col_start, len(df_va[m])):
df_va[m][i] = df_va[m][i] / fact[m]
return pd.DataFrame(df_va, columns=df.columns)
lst = [['anime 1', 1, 1, 0, 1, 1, 0], ['anime 2', 0, 0, 0, 0, 0, 1], ['anime 3', 1, 0, 1, 1, 0, 0]]
cols = ['Anime', 'category_1', 'category_2', 'category_3', 'category_4', 'category_5', 'category_6']
example = pd.DataFrame(lst, columns=cols)
example
weight_df(example, col_start=1) | code |
32070671/cell_9 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
df.describe() | code |
32070671/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.text(9.6, 120, 'average', fontsize=14)
def transf(x):
if x == 0:
return 1
else:
return x
def weight_df(df, col_start=12):
fact = df[df.columns[col_start:]].sum(axis=1).apply(lambda x: transf(x))
df_va = df.values
for m in range(len(df_va)):
df_va[m]
for i in range(col_start, len(df_va[m])):
df_va[m][i] = df_va[m][i] / fact[m]
return pd.DataFrame(df_va, columns=df.columns)
lst = [['anime 1', 1, 1, 0, 1, 1, 0], ['anime 2', 0, 0, 0, 0, 0, 1], ['anime 3', 1, 0, 1, 1, 0, 0]]
cols = ['Anime', 'category_1', 'category_2', 'category_3', 'category_4', 'category_5', 'category_6']
example = pd.DataFrame(lst, columns=cols)
example
df_weighted = weight_df(df)
nb_0_genre = (df[df.columns[12:]].sum(axis=1) == 0).sum()
weighted_betw = df_weighted[df_weighted.columns[12:]].sum()
weighted_betw['NO genre'] = nb_0_genre
distrib_genre = 100 * weighted_betw / weighted_betw.sum()
distrib_genre = distrib_genre.sort_values(ascending=False)
plt.figure(figsize=(15, 10))
bar = sns.barplot(distrib_genre.index, distrib_genre)
plt.title('Distribution of genres', fontsize=18)
plt.ylabel('%', fontsize=18)
bar.tick_params(labelsize=16)
for item in bar.get_xticklabels():
item.set_rotation(90) | code |
32070671/cell_20 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.text(9.6, 120, 'average', fontsize=14)
def transf(x):
if x == 0:
return 1
else:
return x
def weight_df(df, col_start=12):
fact = df[df.columns[col_start:]].sum(axis=1).apply(lambda x: transf(x))
df_va = df.values
for m in range(len(df_va)):
df_va[m]
for i in range(col_start, len(df_va[m])):
df_va[m][i] = df_va[m][i] / fact[m]
return pd.DataFrame(df_va, columns=df.columns)
lst = [['anime 1', 1, 1, 0, 1, 1, 0], ['anime 2', 0, 0, 0, 0, 0, 1], ['anime 3', 1, 0, 1, 1, 0, 0]]
cols = ['Anime', 'category_1', 'category_2', 'category_3', 'category_4', 'category_5', 'category_6']
example = pd.DataFrame(lst, columns=cols)
example | code |
32070671/cell_26 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.text(9.6, 120, 'average', fontsize=14)
def transf(x):
if x == 0:
return 1
else:
return x
def weight_df(df, col_start=12):
fact = df[df.columns[col_start:]].sum(axis=1).apply(lambda x: transf(x))
df_va = df.values
for m in range(len(df_va)):
df_va[m]
for i in range(col_start, len(df_va[m])):
df_va[m][i] = df_va[m][i] / fact[m]
return pd.DataFrame(df_va, columns=df.columns)
lst = [['anime 1', 1, 1, 0, 1, 1, 0], ['anime 2', 0, 0, 0, 0, 0, 1], ['anime 3', 1, 0, 1, 1, 0, 0]]
cols = ['Anime', 'category_1', 'category_2', 'category_3', 'category_4', 'category_5', 'category_6']
example = pd.DataFrame(lst, columns=cols)
example
df_weighted = weight_df(df)
nb_0_genre = (df[df.columns[12:]].sum(axis=1) == 0).sum()
weighted_betw = df_weighted[df_weighted.columns[12:]].sum()
weighted_betw['NO genre'] = nb_0_genre
distrib_genre = 100 * weighted_betw / weighted_betw.sum()
distrib_genre = distrib_genre.sort_values(ascending=False)
def create_bins(v):
if v > 10000:
return '>10000'
elif v > 2000:
return '2000-10000'
elif v > 500:
return '500-2000'
elif v > 100:
return '100-500'
elif v >= 10:
return '10-100'
else:
return '<10'
df['votes_cat'] = df['votes'].apply(create_bins) | code |
32070671/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx | code |
32070671/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv') | code |
32070671/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.figure(figsize=(10, 6))
number_genre.plot.bar(color=colors)
plt.title('Repartition of the number of genres', fontsize=18)
plt.axhline(average, 0, 1, color='black', lw=3)
plt.text(9.6, 120, 'average', fontsize=14)
plt.ylabel('Animes count', fontsize=14)
plt.xlabel('\nNumber of genres', fontsize=14)
plt.show() | code |
32070671/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.text(9.6, 120, 'average', fontsize=14)
def transf(x):
if x == 0:
return 1
else:
return x
def weight_df(df, col_start=12):
fact = df[df.columns[col_start:]].sum(axis=1).apply(lambda x: transf(x))
df_va = df.values
for m in range(len(df_va)):
df_va[m]
for i in range(col_start, len(df_va[m])):
df_va[m][i] = df_va[m][i] / fact[m]
return pd.DataFrame(df_va, columns=df.columns)
lst = [['anime 1', 1, 1, 0, 1, 1, 0], ['anime 2', 0, 0, 0, 0, 0, 1], ['anime 3', 1, 0, 1, 1, 0, 0]]
cols = ['Anime', 'category_1', 'category_2', 'category_3', 'category_4', 'category_5', 'category_6']
example = pd.DataFrame(lst, columns=cols)
example
df_weighted = weight_df(df)
nb_0_genre = (df[df.columns[12:]].sum(axis=1) == 0).sum()
weighted_betw = df_weighted[df_weighted.columns[12:]].sum()
weighted_betw['NO genre'] = nb_0_genre
distrib_genre = 100 * weighted_betw / weighted_betw.sum()
distrib_genre = distrib_genre.sort_values(ascending=False)
# Display the results
plt.figure(figsize =(15,10))
bar = sns.barplot(distrib_genre.index, distrib_genre)
plt.title("Distribution of genres", fontsize = 18)
plt.ylabel("%", fontsize = 18)
bar.tick_params(labelsize=16)
# Rotate the x-labels
for item in bar.get_xticklabels():
item.set_rotation(90)
mean_ratings = []
for g in df_weighted.columns[12:]:
rating = (df_weighted['rate'] * df_weighted[g]).sum() / df_weighted[g].sum()
mean_ratings.append([g, rating])
mean_ratings = pd.DataFrame(mean_ratings, columns=['Genre', 'Rating']).sort_values(by='Rating', ascending=False)
plt.figure(figsize=(15, 10))
bar = sns.barplot('Genre', 'Rating', data=mean_ratings, palette='coolwarm')
plt.title('Mean Rating for each Genre', fontsize=18)
plt.ylabel('Mean Rating', fontsize=18)
plt.xlabel('')
bar.tick_params(labelsize=16)
for item in bar.get_xticklabels():
item.set_rotation(90) | code |
32070671/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
number_genre = df[df.columns[12:]].sum(axis=1).value_counts().sort_index()
colors = []
maxg = max(number_genre)
for n in number_genre:
x = 0.8 - n / (2 * maxg)
colors.append((0.7, x, x))
average = number_genre.mean()
plt.text(9.6, 120, 'average', fontsize=14)
def transf(x):
if x == 0:
return 1
else:
return x
def weight_df(df, col_start=12):
fact = df[df.columns[col_start:]].sum(axis=1).apply(lambda x: transf(x))
df_va = df.values
for m in range(len(df_va)):
df_va[m]
for i in range(col_start, len(df_va[m])):
df_va[m][i] = df_va[m][i] / fact[m]
return pd.DataFrame(df_va, columns=df.columns)
lst = [['anime 1', 1, 1, 0, 1, 1, 0], ['anime 2', 0, 0, 0, 0, 0, 1], ['anime 3', 1, 0, 1, 1, 0, 0]]
cols = ['Anime', 'category_1', 'category_2', 'category_3', 'category_4', 'category_5', 'category_6']
example = pd.DataFrame(lst, columns=cols)
example
df_weighted = weight_df(df)
nb_0_genre = (df[df.columns[12:]].sum(axis=1) == 0).sum()
weighted_betw = df_weighted[df_weighted.columns[12:]].sum()
weighted_betw['NO genre'] = nb_0_genre
distrib_genre = 100 * weighted_betw / weighted_betw.sum()
distrib_genre = distrib_genre.sort_values(ascending=False) | code |
32070671/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
df = pd.read_csv('animes.csv')
for c in df.columns[12:]:
df[c] = df[c].astype('int')
idx = []
for i in df.columns:
idx.append(i.replace('genre_', ''))
df.columns = idx
import seaborn as sns
import matplotlib.pyplot as plt
sns.heatmap(df.isnull())
plt.title('Missing values?', fontsize=18)
plt.show() | code |
16166543/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data_lokasi = pd.read_csv('../input/catatan_lokasi.csv')
profil_karyawan = pd.read_csv('../input/data_profil.csv')
profil_karyawan.head() | code |
16166543/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data_lokasi = pd.read_csv('../input/catatan_lokasi.csv')
profil_karyawan = pd.read_csv('../input/data_profil.csv')
data_lokasi.head() | code |
50226402/cell_13 | [
"text_plain_output_1.png"
] | def linearSearch(array, n, x):
for i in range(0, n):
if array[i] == x:
return i
return -1
array = [10, 20, 30, 40, 50, 60, 70]
x = 50
n = len(array)
result = linearSearch(array, n, x)
def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
element = 50
array = [10, 20, 30, 40, 50, 60, 70]
def selectionSort(array, size):
for step in range(size):
min_idx = step
for i in range(step + 1, size):
if array[i] < array[min_idx]:
min_idx = i
array[step], array[min_idx] = (array[min_idx], array[step])
data = [10, 5, 30, 15, 50, 6, 25]
size = len(data)
selectionSort(data, size)
def insertionSort(array):
for step in range(1, len(array)):
key = array[step]
j = step - 1
while j >= 0 and key < array[j]:
array[j + 1] = array[j]
j = j - 1
array[j + 1] = key
data = [10, 5, 30, 15, 50, 6, 25]
insertionSort(data)
print('Sorted Array in Ascending Order:')
print(data) | code |
50226402/cell_9 | [
"text_plain_output_1.png"
] | def linearSearch(array, n, x):
for i in range(0, n):
if array[i] == x:
return i
return -1
array = [10, 20, 30, 40, 50, 60, 70]
x = 50
n = len(array)
result = linearSearch(array, n, x)
def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
element = 50
array = [10, 20, 30, 40, 50, 60, 70]
print('Searching for {}'.format(element))
print('Index of {}: {}'.format(element, binary_search_recursive(array, element, 0, len(array)))) | code |
50226402/cell_4 | [
"text_plain_output_1.png"
] | A = [1, 22, 30, 35, 300, 1000]
A = [1, 22, 30, 35, 300, 1000]
print(A.index(35)) | code |
50226402/cell_6 | [
"text_plain_output_1.png"
] | def linearSearch(array, n, x):
for i in range(0, n):
if array[i] == x:
return i
return -1
array = [10, 20, 30, 40, 50, 60, 70]
x = 50
n = len(array)
result = linearSearch(array, n, x)
if result == -1:
print('Element not found')
else:
print('Element found at index: ', result) | code |
50226402/cell_2 | [
"text_plain_output_1.png"
] | A = [1, 22, 30, 35, 300, 1000]
print(A) | code |
50226402/cell_11 | [
"text_plain_output_1.png"
] | def linearSearch(array, n, x):
for i in range(0, n):
if array[i] == x:
return i
return -1
array = [10, 20, 30, 40, 50, 60, 70]
x = 50
n = len(array)
result = linearSearch(array, n, x)
def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
element = 50
array = [10, 20, 30, 40, 50, 60, 70]
def selectionSort(array, size):
for step in range(size):
min_idx = step
for i in range(step + 1, size):
if array[i] < array[min_idx]:
min_idx = i
array[step], array[min_idx] = (array[min_idx], array[step])
data = [10, 5, 30, 15, 50, 6, 25]
size = len(data)
selectionSort(data, size)
print('Sorted Array in Ascending Order:')
print(data) | code |
104115755/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df.duplicated('no_report').value_counts()
df.drop_duplicates('no_report', keep='last', inplace=True)
df.duplicated('no_report').value_counts()
plt.figure(figsize=(12, 15))
sns.set_style('ticks')
product = df['products_types'].sort_values(ascending=False)
sns.countplot(data=df, y=product)
plt.tight_layout()
plt.title('Reports by Industry\n', fontsize=20)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
plt.show()
df.products_types.value_counts().sort_values(ascending=False) | code |
104115755/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df.head(10) | code |
104115755/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns | code |
104115755/cell_25 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df['created_date'] = pd.to_datetime(df['created_date'], format='%m/%d/%Y')
df['start_date'] = pd.to_datetime(df['start_date'], format='%m/%d/%Y')
df.duplicated('no_report').value_counts()
df.drop_duplicates('no_report', keep='last', inplace=True)
df.duplicated('no_report').value_counts()
sns.set_style('ticks')
product = df['products_types'].sort_values(ascending=False)
plt.tight_layout()
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
df.products_types.value_counts().sort_values(ascending=False)
sns.set_style('ticks')
df['products_name'].value_counts()[1:40].sort_values(ascending=True).plot.barh()
plt.tight_layout()
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
df['products_name'].value_counts()[0:41].sort_values(ascending=False)
symptoms = []
for _, reactions in df['symptoms'].astype(object).str.split(',').iteritems():
symptoms += [str(l).strip().title() for l in pd.Series(reactions).astype(object)]
outcome_df = pd.DataFrame({'Symptoms': pd.Series(symptoms).value_counts().index, 'Count': pd.Series(symptoms).value_counts()})[:100]
fig, ax = plt.subplots(figsize=(10, 23))
sns.barplot(x='Count', y='Symptoms', data=outcome_df).set_title('Health event counts by product type') | code |
104115755/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df.duplicated('no_report').value_counts()
df.drop_duplicates('no_report', keep='last', inplace=True)
df.duplicated('no_report').value_counts()
sns.set_style('ticks')
product = df['products_types'].sort_values(ascending=False)
plt.tight_layout()
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
df.products_types.value_counts().sort_values(ascending=False)
plt.figure(figsize=(12, 15))
sns.set_style('ticks')
df['products_name'].value_counts()[1:40].sort_values(ascending=True).plot.barh()
plt.tight_layout()
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
df['products_name'].value_counts()[0:41].sort_values(ascending=False) | code |
104115755/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape | code |
104115755/cell_26 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df['created_date'] = pd.to_datetime(df['created_date'], format='%m/%d/%Y')
df['start_date'] = pd.to_datetime(df['start_date'], format='%m/%d/%Y')
df.duplicated('no_report').value_counts()
df.drop_duplicates('no_report', keep='last', inplace=True)
df.duplicated('no_report').value_counts()
sns.set_style('ticks')
product = df['products_types'].sort_values(ascending=False)
plt.tight_layout()
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
df.products_types.value_counts().sort_values(ascending=False)
sns.set_style('ticks')
df['products_name'].value_counts()[1:40].sort_values(ascending=True).plot.barh()
plt.tight_layout()
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
df['products_name'].value_counts()[0:41].sort_values(ascending=False)
symptoms=[]
for _, reactions in df['symptoms'].astype(object).str.split(",").iteritems():
symptoms += [str(l).strip().title() for l in pd.Series(reactions).astype(object)]
outcome_df=pd.DataFrame({'Symptoms':pd.Series(symptoms).value_counts().index, 'Count':pd.Series(symptoms).value_counts()})[:100]
fig, ax = plt.subplots(figsize=(10,23))
sns.barplot(x='Count',y='Symptoms', data=outcome_df).set_title('Health event counts by product type')
plt.figure(figsize=(10, 10))
df['gender'].value_counts().plot.pie(autopct='%.2f', legend=True)
plt.tight_layout() | code |
104115755/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104115755/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns | code |
104115755/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df.duplicated('no_report').value_counts()
df.drop_duplicates('no_report', keep='last', inplace=True)
print('Duplicate Data')
df.duplicated('no_report').value_counts() | code |
104115755/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
print('Data is NULL:\n', df[df.columns[df.isnull().sum() != 0]].isnull().sum())
plt.figure(figsize=(8, 8))
sns.heatmap(df.isnull())
plt.show() | code |
104115755/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
print('Duplicate Data\n')
df.duplicated('no_report').value_counts() | code |
104115755/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.shape
df.columns
df.rename(columns={'PRI_Reported Brand/Product Name': 'products_name', 'SYM_One Row Coded Symptoms': 'symptoms', 'CI_Gender': 'gender', 'CI_Age at Adverse Event': 'age', 'CI_Age Unit': 'age_unit', 'RA_Report #': 'no_report', 'RA_CAERS Created Date': 'created_date', 'AEC_Event Start Date': 'start_date', 'PRI_Product Role': 'products_role', 'PRI_FDA Industry Code': 'industry_code', 'AEC_One Row Outcomes': 'Outcomes', 'PRI_FDA Industry Name': 'products_types'}, inplace=True)
df.columns
df.info() | code |
104115755/cell_5 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/adverse-food-events/CAERS_ASCII_2004_2017Q2.csv')
df.head() | code |
2045135/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2020652/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
2020652/cell_1 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd
import numpy as np
import xgboost as xgb
from scipy.optimize import minimize
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
import os
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2020652/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
target = train['target']
train.drop(['id', 'target'], axis=1, inplace=True)
train.shape | code |
34148902/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
sample_id = 50
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=2)
X_train = torch.from_numpy(X_train).type(torch.FloatTensor)
X_val = torch.from_numpy(X_val).type(torch.FloatTensor)
y_train = torch.from_numpy(y_train).type(torch.LongTensor)
y_val = torch.from_numpy(y_val).type(torch.LongTensor)
batch_size = 100
num_epochs = 20
train = torch.utils.data.TensorDataset(X_train, y_train)
val = torch.utils.data.TensorDataset(X_val, y_val)
train_loader = DataLoader(train, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False)
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=0)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.cnn2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0)
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(32 * 4 * 4, 10)
def forward(self, x):
out = self.cnn1(x)
out = self.relu1(out)
out = self.maxpool1(out)
out = self.cnn2(out)
out = self.relu2(out)
out = self.maxpool2(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
return out
class LeNet_dropout(nn.Module):
def __init__(self):
super(LeNet_dropout, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.dropout1 = nn.Dropout2d(p=0.2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.dropout2 = nn.Dropout2d(p=0.2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.dropout1(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.dropout2(x)
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
model = LeNet()
if use_gpu:
model = model.cuda()
model = LeNet_dropout()
if use_gpu:
model = net.cuda()
model = CNNModel()
if use_gpu:
model = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
count = 0
loss_list = []
val_loss_list = []
iteration_list = []
accuracy_list = []
for epoch in range(num_epochs):
for images, labels in train_loader:
train_batch = Variable(images.view(batch_size, 1, 28, 28), requires_grad=True)
labels_batch = Variable(labels, requires_grad=False)
if use_gpu:
train_batch = train_batch.cuda()
labels_batch = labels_batch.cuda()
optimizer.zero_grad()
outputs = model(train_batch)
loss = criterion(outputs, labels_batch)
loss.backward()
optimizer.step()
count += 1
if count % 50 == 0:
correct = 0
total = 0
for images, labels in val_loader:
val_batch = Variable(images.view(-1, 1, 28, 28), requires_grad=False)
labels_batch = Variable(labels, requires_grad=False)
if use_gpu:
val_batch = val_batch.cuda()
labels_batch = labels_batch.cuda()
outputs = model(val_batch)
pred = torch.max(outputs.data, 1)[1]
val_loss = criterion(outputs, labels_batch)
total += len(labels)
correct += (pred == labels_batch).sum()
accuracy = correct / float(total) * 100
loss_list.append(loss.data)
val_loss_list.append(val_loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
if count % 500 == 0:
print('Iteration: {}, Loss: {}, Accuracy: {}'.format(count, loss.data, accuracy)) | code |
34148902/cell_4 | [
"image_output_2.png",
"image_output_1.png"
] | import torch
use_gpu = torch.cuda.is_available()
use_gpu | code |
34148902/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
sample_id = 50
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=2)
X_train = torch.from_numpy(X_train).type(torch.FloatTensor)
X_val = torch.from_numpy(X_val).type(torch.FloatTensor)
y_train = torch.from_numpy(y_train).type(torch.LongTensor)
y_val = torch.from_numpy(y_val).type(torch.LongTensor)
batch_size = 100
num_epochs = 20
train = torch.utils.data.TensorDataset(X_train, y_train)
val = torch.utils.data.TensorDataset(X_val, y_val)
train_loader = DataLoader(train, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False)
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=0)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.cnn2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0)
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(32 * 4 * 4, 10)
def forward(self, x):
out = self.cnn1(x)
out = self.relu1(out)
out = self.maxpool1(out)
out = self.cnn2(out)
out = self.relu2(out)
out = self.maxpool2(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
return out
class LeNet_dropout(nn.Module):
def __init__(self):
super(LeNet_dropout, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.dropout1 = nn.Dropout2d(p=0.2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.dropout2 = nn.Dropout2d(p=0.2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.dropout1(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.dropout2(x)
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
model = LeNet()
if use_gpu:
model = model.cuda()
model = LeNet_dropout()
if use_gpu:
model = net.cuda()
model = CNNModel()
if use_gpu:
model = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
count = 0
loss_list = []
val_loss_list = []
iteration_list = []
accuracy_list = []
for epoch in range(num_epochs):
for images, labels in train_loader:
train_batch = Variable(images.view(batch_size, 1, 28, 28), requires_grad=True)
labels_batch = Variable(labels, requires_grad=False)
if use_gpu:
train_batch = train_batch.cuda()
labels_batch = labels_batch.cuda()
optimizer.zero_grad()
outputs = model(train_batch)
loss = criterion(outputs, labels_batch)
loss.backward()
optimizer.step()
count += 1
if count % 50 == 0:
correct = 0
total = 0
for images, labels in val_loader:
val_batch = Variable(images.view(-1, 1, 28, 28), requires_grad=False)
labels_batch = Variable(labels, requires_grad=False)
if use_gpu:
val_batch = val_batch.cuda()
labels_batch = labels_batch.cuda()
outputs = model(val_batch)
pred = torch.max(outputs.data, 1)[1]
val_loss = criterion(outputs, labels_batch)
total += len(labels)
correct += (pred == labels_batch).sum()
accuracy = correct / float(total) * 100
loss_list.append(loss.data)
val_loss_list.append(val_loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
plt.figure(figsize=(15, 8))
plt.plot(iteration_list, loss_list, label='training')
plt.plot(iteration_list, val_loss_list, label='validation')
plt.xlabel('Number of iteration')
plt.ylabel('Loss')
plt.title('CNN: Loss vs Number of iteration')
plt.legend()
plt.show()
plt.figure(figsize=(15, 8))
plt.plot(iteration_list, accuracy_list, color='red')
plt.xlabel('Number of iteration')
plt.ylabel('Accuracy')
plt.title('CNN: Accuracy vs Number of iteration')
plt.show() | code |
34148902/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
plt.figure(figsize=(12, 8))
sns.countplot(y_train)
y_train.unique() | code |
34148902/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
sample_id = 50
plt.imshow(X_train.loc[sample_id].values.reshape(28, 28))
plt.title(str(y_train[sample_id]))
plt.show() | code |
34148902/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34148902/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import torch
import torch.nn.functional as F
use_gpu = torch.cuda.is_available()
use_gpu
df_train_path = '/kaggle/input/digit-recognizer/train.csv'
df_test_path = '/kaggle/input/digit-recognizer/test.csv'
X_train = pd.read_csv(df_train_path)
X_test = pd.read_csv(df_test_path)
y_train = X_train['label']
X_train = X_train.drop('label', axis=1)
sns.set_style('darkgrid')
y_train.unique()
sample_id = 50
X_train /= 255.0
X_test /= 255.0
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=2)
X_train = torch.from_numpy(X_train).type(torch.FloatTensor)
X_val = torch.from_numpy(X_val).type(torch.FloatTensor)
y_train = torch.from_numpy(y_train).type(torch.LongTensor)
y_val = torch.from_numpy(y_val).type(torch.LongTensor)
batch_size = 100
num_epochs = 20
train = torch.utils.data.TensorDataset(X_train, y_train)
val = torch.utils.data.TensorDataset(X_val, y_val)
train_loader = DataLoader(train, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False)
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=0)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.cnn2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0)
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(32 * 4 * 4, 10)
def forward(self, x):
out = self.cnn1(x)
out = self.relu1(out)
out = self.maxpool1(out)
out = self.cnn2(out)
out = self.relu2(out)
out = self.maxpool2(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
return out
class LeNet_dropout(nn.Module):
def __init__(self):
super(LeNet_dropout, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5), padding=2)
self.dropout1 = nn.Dropout2d(p=0.2)
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.dropout2 = nn.Dropout2d(p=0.2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.dropout1(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = self.dropout2(x)
shape = x.size()[1:]
features = 1
for s in shape:
features *= s
x = x.view(-1, features)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
model = LeNet()
if use_gpu:
model = model.cuda()
model = LeNet_dropout()
if use_gpu:
model = net.cuda()
model = CNNModel()
if use_gpu:
model = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
count = 0
loss_list = []
val_loss_list = []
iteration_list = []
accuracy_list = []
for epoch in range(num_epochs):
for images, labels in train_loader:
train_batch = Variable(images.view(batch_size, 1, 28, 28), requires_grad=True)
labels_batch = Variable(labels, requires_grad=False)
if use_gpu:
train_batch = train_batch.cuda()
labels_batch = labels_batch.cuda()
optimizer.zero_grad()
outputs = model(train_batch)
loss = criterion(outputs, labels_batch)
loss.backward()
optimizer.step()
count += 1
if count % 50 == 0:
correct = 0
total = 0
for images, labels in val_loader:
val_batch = Variable(images.view(-1, 1, 28, 28), requires_grad=False)
labels_batch = Variable(labels, requires_grad=False)
if use_gpu:
val_batch = val_batch.cuda()
labels_batch = labels_batch.cuda()
outputs = model(val_batch)
pred = torch.max(outputs.data, 1)[1]
val_loss = criterion(outputs, labels_batch)
total += len(labels)
correct += (pred == labels_batch).sum()
accuracy = correct / float(total) * 100
loss_list.append(loss.data)
val_loss_list.append(val_loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
output_file = np.ndarray(shape=(n_test_samples, 2), dtype=int)
for test_idx in range(n_test_samples):
test_sample = X_test[test_idx].clone().unsqueeze(dim=1)
test_sample = test_sample.type(torch.FloatTensor)
if use_gpu:
test_sample = test_sample.cuda()
pred = net(test_sample)
_, pred = torch.max(pred, 1)
output_file[test_idx][0] = test_idx + 1
output_file[test_idx][1] = pred
if test_idx % 1000 == 0:
print(f'testing sample #{test_idx}')
submission = pd.DataFrame(output_file, dtype=int, columns=['ImageId', 'Label']) | code |
323646/cell_13 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
act_train = pd.read_csv('../input/act_train.csv')
act_test = pd.read_csv('../input/act_test.csv')
act_train_char_10 = act_train[act_train['char_10'].notnull().values]
act_test_char_10 = act_test[act_test['char_10'].notnull().values]
drop_list = ['char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9']
act_train_char_10.drop(drop_list, axis=1, inplace=True)
act_test_char_10.drop(drop_list, axis=1, inplace=True)
act_train_char_X = act_train[act_train['char_10'].isnull().values]
act_test_char_X = act_test[act_test['char_10'].isnull().values]
drop_list = ['char_10']
act_train_char_X.drop(drop_list, axis=1, inplace=True)
act_test_char_X.drop(drop_list, axis=1, inplace=True)
people = pd.read_csv('../input/people.csv')
act_train_char_10 = pd.merge(act_train_char_10, people, on='people_id', how='left')
act_test_char_10 = pd.merge(act_test_char_10, people, on='people_id', how='left')
act_train_char_X = pd.merge(act_train_char_X, people, on='people_id', how='left')
act_test_char_X = pd.merge(act_test_char_X, people, on='people_id', how='left')
act_train_char_10.drop(['date_x', 'date_y'], axis=1, inplace=True)
act_test_char_10.drop(['date_x', 'date_y'], axis=1, inplace=True)
rename_dict = {'char_10_x': 'char_10_act', 'char_10_y': 'char_10_peo'}
act_train_char_10.rename(columns=rename_dict, inplace=True)
act_test_char_10.rename(columns=rename_dict, inplace=True)
act_train_char_X.drop(['date_x', 'date_y'], axis=1, inplace=True)
act_test_char_X.drop(['date_x', 'date_y'], axis=1, inplace=True)
rename_dict = {'char_1_x': 'char_1_act', 'char_2_x': 'char_2_act', 'char_3_x': 'char_3_act', 'char_4_x': 'char_4_act', 'char_5_x': 'char_5_act', 'char_6_x': 'char_6_act', 'char_7_x': 'char_7_act', 'char_8_x': 'char_8_act', 'char_9_x': 'char_9_act', 'char_1_y': 'char_1_peo', 'char_2_y': 'char_2_peo', 'char_3_y': 'char_3_peo', 'char_4_y': 'char_4_peo', 'char_5_y': 'char_5_peo', 'char_6_y': 'char_6_peo', 'char_7_y': 'char_7_peo', 'char_8_y': 'char_8_peo', 'char_9_y': 'char_9_peo'}
act_train_char_X.rename(columns=rename_dict, inplace=True)
act_test_char_X.rename(columns=rename_dict, inplace=True)
act_train_char_10 = act_train_char_10.astype(str)
act_test_char_10 = act_test_char_10.astype(str)
act_train_char_X = act_train_char_X.astype(str)
act_test_char_X = act_test_char_X.astype(str)
act_train_char_10 = act_train_char_10.replace(['True', 'False'], [1, 0])
act_test_char_10 = act_test_char_10.replace(['True', 'False'], [1, 0])
act_train_char_X = act_train_char_X.replace(['True', 'False'], [1, 0])
act_test_char_X = act_test_char_X.replace(['True', 'False'], [1, 0])
features_char_10 = list(act_train_char_10.columns.values)
features_char_X = list(act_train_char_X.columns.values)
features_char_10.remove('people_id')
features_char_10.remove('activity_id')
features_char_10.remove('outcome')
features_char_X.remove('people_id')
features_char_X.remove('activity_id')
features_char_X.remove('outcome')
label = ['outcome']
VALIDATION_SIZE = 10000
dtc_char_10 = DecisionTreeClassifier()
dtc_char_10.fit(act_train_char_10[features_char_10].ix[VALIDATION_SIZE:], act_train_char_10[label].ix[VALIDATION_SIZE:])
print('Accuracy_Char_10 = ' + str(dtc_char_10.score(act_train_char_10[features_char_10].ix[:VALIDATION_SIZE], act_train_char_10[label].ix[:VALIDATION_SIZE])))
dct_char_X = LogisticRegression()
dct_char_X.fit(act_train_char_X[features_char_X].ix[VALIDATION_SIZE:], act_train_char_X[label].ix[VALIDATION_SIZE:])
print('Accuracy_Char_X = ' + str(dct_char_X.score(act_train_char_X[features_char_X].ix[:VALIDATION_SIZE], act_train_char_X[label].ix[:VALIDATION_SIZE]))) | code |
323646/cell_3 | [
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
act_train = pd.read_csv('../input/act_train.csv')
act_test = pd.read_csv('../input/act_test.csv')
act_train_char_10 = act_train[act_train['char_10'].notnull().values]
act_test_char_10 = act_test[act_test['char_10'].notnull().values]
drop_list = ['char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9']
act_train_char_10.drop(drop_list, axis=1, inplace=True)
act_test_char_10.drop(drop_list, axis=1, inplace=True)
act_train_char_X = act_train[act_train['char_10'].isnull().values]
act_test_char_X = act_test[act_test['char_10'].isnull().values]
drop_list = ['char_10']
act_train_char_X.drop(drop_list, axis=1, inplace=True)
act_test_char_X.drop(drop_list, axis=1, inplace=True) | code |
323646/cell_14 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import pandas as pd
act_train = pd.read_csv('../input/act_train.csv')
act_test = pd.read_csv('../input/act_test.csv')
act_train_char_10 = act_train[act_train['char_10'].notnull().values]
act_test_char_10 = act_test[act_test['char_10'].notnull().values]
drop_list = ['char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9']
act_train_char_10.drop(drop_list, axis=1, inplace=True)
act_test_char_10.drop(drop_list, axis=1, inplace=True)
act_train_char_X = act_train[act_train['char_10'].isnull().values]
act_test_char_X = act_test[act_test['char_10'].isnull().values]
drop_list = ['char_10']
act_train_char_X.drop(drop_list, axis=1, inplace=True)
act_test_char_X.drop(drop_list, axis=1, inplace=True)
people = pd.read_csv('../input/people.csv')
act_train_char_10 = pd.merge(act_train_char_10, people, on='people_id', how='left')
act_test_char_10 = pd.merge(act_test_char_10, people, on='people_id', how='left')
act_train_char_X = pd.merge(act_train_char_X, people, on='people_id', how='left')
act_test_char_X = pd.merge(act_test_char_X, people, on='people_id', how='left')
act_train_char_10.drop(['date_x', 'date_y'], axis=1, inplace=True)
act_test_char_10.drop(['date_x', 'date_y'], axis=1, inplace=True)
rename_dict = {'char_10_x': 'char_10_act', 'char_10_y': 'char_10_peo'}
act_train_char_10.rename(columns=rename_dict, inplace=True)
act_test_char_10.rename(columns=rename_dict, inplace=True)
act_train_char_X.drop(['date_x', 'date_y'], axis=1, inplace=True)
act_test_char_X.drop(['date_x', 'date_y'], axis=1, inplace=True)
rename_dict = {'char_1_x': 'char_1_act', 'char_2_x': 'char_2_act', 'char_3_x': 'char_3_act', 'char_4_x': 'char_4_act', 'char_5_x': 'char_5_act', 'char_6_x': 'char_6_act', 'char_7_x': 'char_7_act', 'char_8_x': 'char_8_act', 'char_9_x': 'char_9_act', 'char_1_y': 'char_1_peo', 'char_2_y': 'char_2_peo', 'char_3_y': 'char_3_peo', 'char_4_y': 'char_4_peo', 'char_5_y': 'char_5_peo', 'char_6_y': 'char_6_peo', 'char_7_y': 'char_7_peo', 'char_8_y': 'char_8_peo', 'char_9_y': 'char_9_peo'}
act_train_char_X.rename(columns=rename_dict, inplace=True)
act_test_char_X.rename(columns=rename_dict, inplace=True)
act_train_char_10 = act_train_char_10.astype(str)
act_test_char_10 = act_test_char_10.astype(str)
act_train_char_X = act_train_char_X.astype(str)
act_test_char_X = act_test_char_X.astype(str)
act_train_char_10 = act_train_char_10.replace(['True', 'False'], [1, 0])
act_test_char_10 = act_test_char_10.replace(['True', 'False'], [1, 0])
act_train_char_X = act_train_char_X.replace(['True', 'False'], [1, 0])
act_test_char_X = act_test_char_X.replace(['True', 'False'], [1, 0])
features_char_10 = list(act_train_char_10.columns.values)
features_char_X = list(act_train_char_X.columns.values)
features_char_10.remove('people_id')
features_char_10.remove('activity_id')
features_char_10.remove('outcome')
features_char_X.remove('people_id')
features_char_X.remove('activity_id')
features_char_X.remove('outcome')
label = ['outcome']
VALIDATION_SIZE = 10000
rfc_char_10 = RandomForestClassifier()
rfc_char_10.fit(act_train_char_10[features_char_10].ix[VALIDATION_SIZE:], act_train_char_10[label].ix[VALIDATION_SIZE:])
print('Accuracy_Char_10 = ' + str(rfc_char_10.score(act_train_char_10[features_char_10].ix[:VALIDATION_SIZE], act_train_char_10[label].ix[:VALIDATION_SIZE])))
rfc_char_X = LogisticRegression()
rfc_char_X.fit(act_train_char_X[features_char_X].ix[VALIDATION_SIZE:], act_train_char_X[label].ix[VALIDATION_SIZE:])
print('Accuracy_Char_X = ' + str(rfc_char_X.score(act_train_char_X[features_char_X].ix[:VALIDATION_SIZE], act_train_char_X[label].ix[:VALIDATION_SIZE]))) | code |
323646/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
act_train = pd.read_csv('../input/act_train.csv')
act_test = pd.read_csv('../input/act_test.csv')
act_train_char_10 = act_train[act_train['char_10'].notnull().values]
act_test_char_10 = act_test[act_test['char_10'].notnull().values]
drop_list = ['char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9']
act_train_char_10.drop(drop_list, axis=1, inplace=True)
act_test_char_10.drop(drop_list, axis=1, inplace=True)
act_train_char_X = act_train[act_train['char_10'].isnull().values]
act_test_char_X = act_test[act_test['char_10'].isnull().values]
drop_list = ['char_10']
act_train_char_X.drop(drop_list, axis=1, inplace=True)
act_test_char_X.drop(drop_list, axis=1, inplace=True)
people = pd.read_csv('../input/people.csv')
act_train_char_10 = pd.merge(act_train_char_10, people, on='people_id', how='left')
act_test_char_10 = pd.merge(act_test_char_10, people, on='people_id', how='left')
act_train_char_X = pd.merge(act_train_char_X, people, on='people_id', how='left')
act_test_char_X = pd.merge(act_test_char_X, people, on='people_id', how='left')
act_train_char_10.drop(['date_x', 'date_y'], axis=1, inplace=True)
act_test_char_10.drop(['date_x', 'date_y'], axis=1, inplace=True)
rename_dict = {'char_10_x': 'char_10_act', 'char_10_y': 'char_10_peo'}
act_train_char_10.rename(columns=rename_dict, inplace=True)
act_test_char_10.rename(columns=rename_dict, inplace=True)
act_train_char_X.drop(['date_x', 'date_y'], axis=1, inplace=True)
act_test_char_X.drop(['date_x', 'date_y'], axis=1, inplace=True)
rename_dict = {'char_1_x': 'char_1_act', 'char_2_x': 'char_2_act', 'char_3_x': 'char_3_act', 'char_4_x': 'char_4_act', 'char_5_x': 'char_5_act', 'char_6_x': 'char_6_act', 'char_7_x': 'char_7_act', 'char_8_x': 'char_8_act', 'char_9_x': 'char_9_act', 'char_1_y': 'char_1_peo', 'char_2_y': 'char_2_peo', 'char_3_y': 'char_3_peo', 'char_4_y': 'char_4_peo', 'char_5_y': 'char_5_peo', 'char_6_y': 'char_6_peo', 'char_7_y': 'char_7_peo', 'char_8_y': 'char_8_peo', 'char_9_y': 'char_9_peo'}
act_train_char_X.rename(columns=rename_dict, inplace=True)
act_test_char_X.rename(columns=rename_dict, inplace=True)
act_train_char_10 = act_train_char_10.astype(str)
act_test_char_10 = act_test_char_10.astype(str)
act_train_char_X = act_train_char_X.astype(str)
act_test_char_X = act_test_char_X.astype(str)
act_train_char_10 = act_train_char_10.replace(['True', 'False'], [1, 0])
act_test_char_10 = act_test_char_10.replace(['True', 'False'], [1, 0])
act_train_char_X = act_train_char_X.replace(['True', 'False'], [1, 0])
act_test_char_X = act_test_char_X.replace(['True', 'False'], [1, 0])
features_char_10 = list(act_train_char_10.columns.values)
features_char_X = list(act_train_char_X.columns.values)
features_char_10.remove('people_id')
features_char_10.remove('activity_id')
features_char_10.remove('outcome')
features_char_X.remove('people_id')
features_char_X.remove('activity_id')
features_char_X.remove('outcome')
label = ['outcome']
VALIDATION_SIZE = 10000
log_reg_char_10 = LogisticRegression()
log_reg_char_10.fit(act_train_char_10[features_char_10].ix[VALIDATION_SIZE:], act_train_char_10[label].ix[VALIDATION_SIZE:])
print('Accuracy_Char_10 = ' + str(log_reg_char_10.score(act_train_char_10[features_char_10].ix[:VALIDATION_SIZE], act_train_char_10[label].ix[:VALIDATION_SIZE])))
log_reg_char_X = LogisticRegression()
log_reg_char_X.fit(act_train_char_X[features_char_X].ix[VALIDATION_SIZE:], act_train_char_X[label].ix[VALIDATION_SIZE:])
print('Accuracy_Char_X = ' + str(log_reg_char_X.score(act_train_char_X[features_char_X].ix[:VALIDATION_SIZE], act_train_char_X[label].ix[:VALIDATION_SIZE]))) | code |
129024559/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape | code |
129024559/cell_25 | [
"text_html_output_1.png"
] | import datetime
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df.Year_Birth = pd.to_datetime(df['Year_Birth'], format='%Y')
year_now = datetime.date.today().year
df['Age'] = df['Year_Birth'].apply(lambda x: year_now - x.year)
df.drop('Year_Birth', axis=1, inplace=True)
# Income spending by age
fig,ax = plt.subplots(1,2,figsize=(18,4))
sns.scatterplot(x='Age',y='Income',data=df, hue='Response',ax=ax[0])
sns.boxplot(x='Age',data=df,ax=ax[1])
plt.show()
df['spending'] = df.MntFishProducts + df.MntFruits + df.MntGoldProds + df.MntMeatProducts + df.MntSweetProducts + df.MntWines
df.drop(['MntFishProducts', 'MntFruits', 'MntGoldProds', 'MntMeatProducts', 'MntSweetProducts', 'MntWines'], axis=1, inplace=True)
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
sns.scatterplot(x='Income', y='spending', data=df, hue='Response', ax=ax[0])
sns.histplot(df.spending, ax=ax[1])
plt.show() | code |
129024559/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import openpyxl
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
import lightgbm as lgb | code |
129024559/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df.describe() | code |
129024559/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df.head() | code |
129024559/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df | code |
129024559/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df['Income'].value_counts() | code |
129024559/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df['Education'].unique() | code |
129024559/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum() | code |
129024559/cell_22 | [
"text_plain_output_1.png"
] | import datetime
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df.Year_Birth = pd.to_datetime(df['Year_Birth'], format='%Y')
year_now = datetime.date.today().year
df['Age'] = df['Year_Birth'].apply(lambda x: year_now - x.year)
df.drop('Year_Birth', axis=1, inplace=True)
fig, ax = plt.subplots(1, 2, figsize=(18, 4))
sns.scatterplot(x='Age', y='Income', data=df, hue='Response', ax=ax[0])
sns.boxplot(x='Age', data=df, ax=ax[1])
plt.show() | code |
129024559/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.info() | code |
129024559/cell_27 | [
"text_html_output_1.png"
] | import datetime
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum()
df.isnull().sum()
df.Year_Birth = pd.to_datetime(df['Year_Birth'], format='%Y')
year_now = datetime.date.today().year
df['Age'] = df['Year_Birth'].apply(lambda x: year_now - x.year)
df.drop('Year_Birth', axis=1, inplace=True)
# Income spending by age
fig,ax = plt.subplots(1,2,figsize=(18,4))
sns.scatterplot(x='Age',y='Income',data=df, hue='Response',ax=ax[0])
sns.boxplot(x='Age',data=df,ax=ax[1])
plt.show()
df['spending'] = df.MntFishProducts + df.MntFruits + df.MntGoldProds + df.MntMeatProducts + df.MntSweetProducts + df.MntWines
df.drop(['MntFishProducts', 'MntFruits', 'MntGoldProds', 'MntMeatProducts', 'MntSweetProducts', 'MntWines'], axis=1, inplace=True)
# Income and spending
fig,ax = plt.subplots(1,2,figsize=(16,4))
sns.scatterplot(x='Income',y='spending',data=df,hue='Response',ax=ax[0])
sns.histplot(df.spending,ax=ax[1])
plt.show()
fig, ax = plt.subplots(2, 2, figsize=(14, 8))
sns.barplot(x='Education', y='Income', data=df, ax=ax[0, 0])
sns.boxplot(x='Education', y='Income', data=df, ax=ax[0, 1])
sns.barplot(x='Education', y='spending', data=df, ax=ax[1, 0])
sns.boxplot(x='Education', y='spending', data=df, ax=ax[1, 1])
plt.show() | code |
129024559/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_excel('/kaggle/input/arketing-campaign/marketing_campaign.xlsx')
df
df.shape
df.isnull().sum() | code |
33115588/cell_13 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
# Ajuste del grado del polinomio
smallgrid = {'degree': [2,3,4,5,6,7,8,9,10]}
grid1 = GridSearchCV(cls,smallgrid)
grid1.fit(x_train,y_train)
# Graficar los resultados
grid_result1 = pd.DataFrame(grid1.cv_results_)
plt = grid_result1.plot(x ='param_degree', y='mean_test_score')
plt = plt.set_ylabel("Precisión")
param_grid = {'C': [0.1, 1, 10, 100], 'degree': [2, 3, 4, 5]}
grid = GridSearchCV(cls, param_grid)
grid.fit(x_train, y_train)
pvt = pd.pivot_table(pd.DataFrame(grid.cv_results_), values='mean_test_score', index='param_degree', columns='param_C')
smallgrid2 = {'C': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20], 'degree': [3]}
grid2 = GridSearchCV(cls, smallgrid2)
grid2.fit(x_train, y_train)
grid_result2 = pd.DataFrame(grid2.cv_results_)
plt = grid_result2.plot(x='param_C', y='mean_test_score')
plt = plt.set_ylabel('Precisión') | code |
33115588/cell_9 | [
"text_plain_output_1.png"
] | from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
smallgrid = {'degree': [2, 3, 4, 5, 6, 7, 8, 9, 10]}
grid1 = GridSearchCV(cls, smallgrid)
grid1.fit(x_train, y_train)
grid_result1 = pd.DataFrame(grid1.cv_results_)
plt = grid_result1.plot(x='param_degree', y='mean_test_score')
plt = plt.set_ylabel('Precisión') | code |
33115588/cell_4 | [
"image_output_1.png"
] | from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
table.head() | code |
33115588/cell_11 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
# Ajuste del grado del polinomio
smallgrid = {'degree': [2,3,4,5,6,7,8,9,10]}
grid1 = GridSearchCV(cls,smallgrid)
grid1.fit(x_train,y_train)
# Graficar los resultados
grid_result1 = pd.DataFrame(grid1.cv_results_)
plt = grid_result1.plot(x ='param_degree', y='mean_test_score')
plt = plt.set_ylabel("Precisión")
param_grid = {'C': [0.1, 1, 10, 100], 'degree': [2, 3, 4, 5]}
grid = GridSearchCV(cls, param_grid)
grid.fit(x_train, y_train)
pvt = pd.pivot_table(pd.DataFrame(grid.cv_results_), values='mean_test_score', index='param_degree', columns='param_C')
sns.heatmap(pvt, annot=True) | code |
33115588/cell_16 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
cls_final = svm.SVC(kernel='poly', C=4, degree=3, gamma=0.01)
cls_final.fit(x_train, y_train)
pred = cls_final.predict(x_test)
print('accuracy:', metrics.accuracy_score(y_test, y_pred=pred))
print(metrics.classification_report(y_test, y_pred=pred)) | code |
33115588/cell_3 | [
"image_output_1.png"
] | from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
table.head() | code |
33115588/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
# Ajuste del grado del polinomio
smallgrid = {'degree': [2,3,4,5,6,7,8,9,10]}
grid1 = GridSearchCV(cls,smallgrid)
grid1.fit(x_train,y_train)
# Graficar los resultados
grid_result1 = pd.DataFrame(grid1.cv_results_)
plt = grid_result1.plot(x ='param_degree', y='mean_test_score')
plt = plt.set_ylabel("Precisión")
param_grid = {'C': [0.1, 1, 10, 100], 'degree': [2, 3, 4, 5]}
grid = GridSearchCV(cls, param_grid)
grid.fit(x_train, y_train)
pvt = pd.pivot_table(pd.DataFrame(grid.cv_results_), values='mean_test_score', index='param_degree', columns='param_C')
smallgrid2 = {'C': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20], 'degree': [3]}
grid2 = GridSearchCV(cls, smallgrid2)
grid2.fit(x_train, y_train)
# Graficar los resultados
grid_result2 = pd.DataFrame(grid2.cv_results_)
plt = grid_result2.plot(x ='param_C', y='mean_test_score')
plt = plt.set_ylabel("Precisión")
smallgrid3 = {'gamma': [0.001, 0.01, 0.1], 'C': [4], 'degree': [3]}
grid3 = GridSearchCV(cls, smallgrid3)
grid3.fit(x_train, y_train)
grid_result3 = pd.DataFrame(grid3.cv_results_)
plt3 = grid_result3.plot(x='param_gamma', y='mean_test_score') | code |
33115588/cell_10 | [
"text_html_output_1.png"
] | from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
param_grid = {'C': [0.1, 1, 10, 100], 'degree': [2, 3, 4, 5]}
grid = GridSearchCV(cls, param_grid)
grid.fit(x_train, y_train) | code |
33115588/cell_12 | [
"image_output_1.png"
] | from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
table = pd.read_csv('../input/genre-dataset/genre_dataset.txt')
table = table[table.genre.str.contains('jazz and blues') | table.genre.str.contains('soul and reggae')]
for i in range(4, len(table.columns)):
col = table.iloc[:, [i]].values
table.iloc[:, [i]] = scale(col)
le = LabelEncoder()
table['genre'] = le.fit_transform(table[['genre']])
X_train, X_test = train_test_split(table, test_size=0.2)
x_train = X_train.iloc[:, 4:].values
y_train = X_train.iloc[:, 0]
x_test = X_test.iloc[:, 4:].values
y_test = X_test.iloc[:, 0]
cls = svm.SVC(kernel='poly')
smallgrid2 = {'C': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20], 'degree': [3]}
grid2 = GridSearchCV(cls, smallgrid2)
grid2.fit(x_train, y_train) | code |
17121947/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.svm import LinearSVC
(X_train.shape, y_train.shape)
logRegModel = LogisticRegression()
logRegModel.fit(X_train, y_train)
svc = LinearSVC(random_state=43)
svc.fit(X_train, y_train)
gbClf = GradientBoostingClassifier()
gbClf.fit(X_train, y_train)
from sklearn.metrics import roc_auc_score
print('GradientBoost roc_auc score:', roc_auc_score(y_true=y_test, y_score=gbClf.predict(X_test)) * 100)
print('Logistic Regression roc_auc score:', roc_auc_score(y_true=y_test, y_score=logRegModel.predict(X_test)) * 100)
print('SVM roc_auc score:', roc_auc_score(y_true=y_test, y_score=svc.predict(X_test)) * 100) | code |
17121947/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
(X_train.shape, y_train.shape)
logRegModel = LogisticRegression()
logRegModel.fit(X_train, y_train) | code |
17121947/cell_25 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from xgboost import XGBClassifier,plot_importance
(X_train.shape, y_train.shape)
xgbClf = XGBClassifier(n_estimators=1000)
xgbClf.fit(X_train, y_train)
plot_importance(xgbClf) | code |
17121947/cell_20 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
(X_train.shape, y_train.shape)
gbClf = GradientBoostingClassifier()
gbClf.fit(X_train, y_train)
print('GradientBoost accuracy:', gbClf.score(X_test, y_test) * 100) | code |
17121947/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from xgboost import XGBClassifier,plot_importance
(X_train.shape, y_train.shape)
xgbClf = XGBClassifier(n_estimators=1000)
xgbClf.fit(X_train, y_train)
xgbClf.score(X_test, y_test) * 100 | code |
17121947/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | (X_train.shape, y_train.shape) | code |
17121947/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
(X_train.shape, y_train.shape)
gbClf = GradientBoostingClassifier()
gbClf.fit(X_train, y_train) | code |
17121947/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import os
print(os.listdir('../input')) | code |
17121947/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import os
scaler = StandardScaler()
sourceDataFile = os.listdir('../input')
f = open('../input/' + sourceDataFile[0])
creditDF = pd.read_csv(f)
X = creditDF.iloc[:, 0:30]
y = creditDF.iloc[:, 30:31]
scaledX = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
pd.plotting.scatter_matrix(scaledX.corr(), figsize=(30, 30)) | code |
17121947/cell_16 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.svm import LinearSVC
(X_train.shape, y_train.shape)
svc = LinearSVC(random_state=43)
svc.fit(X_train, y_train) | code |
17121947/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
(X_train.shape, y_train.shape)
logRegModel = LogisticRegression()
logRegModel.fit(X_train, y_train)
svc = LinearSVC(random_state=43)
svc.fit(X_train, y_train)
print('Logistic regression accuracy:', logRegModel.score(X_test, y_test) * 100)
print('SVC accuracy:', svc.score(X_test, y_test) * 100) | code |
17121947/cell_24 | [
"text_plain_output_1.png"
] | from xgboost import XGBClassifier,plot_importance
(X_train.shape, y_train.shape)
xgbClf = XGBClassifier(n_estimators=1000)
xgbClf.fit(X_train, y_train) | code |
121149216/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum() | code |
121149216/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.head(5) | code |
121149216/cell_9 | [
"text_html_output_2.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape | code |
121149216/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum()
viz_data = mo.copy(True)
viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False)
mo['brand_name'].value_counts() | code |
121149216/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum()
viz_data = mo.copy(True)
viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False) | code |
121149216/cell_33 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum()
viz_data = mo.copy(True)
viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False)
pvt = mo.pivot_table(index='model_name', values='sellers_amount', aggfunc=['min', 'mean', 'max', 'sum', 'std', 'count'])
pvt
mo[(mo.screen_size < 6) & (mo.screen_size > 2)] | code |
121149216/cell_40 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum()
viz_data = mo.copy(True)
viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False)
pvt = mo.pivot_table(index='model_name', values='sellers_amount', aggfunc=['min', 'mean', 'max', 'sum', 'std', 'count'])
pvt
mo[(mo.screen_size < 6) & (mo.screen_size > 2)]
br_data = mo.copy(True)
br_data = mo.groupby('brand_name').count()
br_data = br_data.sort_values('model_name', ascending=False)
px.bar(x=br_data.index, y=br_data.model_name, color_discrete_sequence=['red'], labels={'x': 'Brand', 'y': 'Phones amount'}) | code |
121149216/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum()
viz_data = mo.copy(True)
viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False)
mo['best_price'].mean() | code |
121149216/cell_11 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns | code |
121149216/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.info() | code |
121149216/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo | code |
121149216/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.tail(5) | code |
121149216/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt, seaborn as sns, plotly.express as px, plotly.figure_factory as ff
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
121149216/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes | code |
121149216/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv')
mo.shape
mo.columns
mo.dtypes
mo.isnull().sum()
viz_data = mo.copy(True)
viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False)
pvt = mo.pivot_table(index='model_name', values='sellers_amount', aggfunc=['min', 'mean', 'max', 'sum', 'std', 'count'])
pvt
mo[(mo.screen_size < 6) & (mo.screen_size > 2)]
mo.describe() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.