path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
105176386/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
def cantidad_goles(texto):
return texto.count('G') - texto.count('OG')
players['goles'] = players['Event'].fillna('').map(cantidad_goles)
players | code |
105176386/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
plt.hist(goles['antiguedad']) | code |
105176386/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105176386/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
cups | code |
105176386/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
players[players['Player Name'].str.contains('MESSI')] | code |
105176386/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
goles['anio_inicio'] = goles['Player Name'].map(lambda x: inicio[x])
goles | code |
105176386/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
goles['antiguedad'] = goles['Year'] - goles['anio_inicio']
goles | code |
105176386/cell_35 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
def cantidad_goles(texto):
return texto.count('G') - texto.count('OG')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
def min_goles(texto):
eventos = texto.split("' ")
goles = [e.replace("'", '') for e in eventos if e and e[0] == 'G']
return [int(g[1:]) for g in goles]
plt.xlim((0, 120))
plt.yticks([])
matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'})
matches.groupby(['Away Team Name', 'Year']).agg({'Away Team Goals': 'mean'})
home_goals = matches[['Home Team Name', 'Year', 'Home Team Goals']].rename(columns={'Home Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
away_goals = matches[['Away Team Name', 'Year', 'Away Team Goals']].rename(columns={'Away Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
goals = pd.concat([home_goals, away_goals], ignore_index=True)
goals.groupby(['Team Name', 'Year']).agg({'Goals': 'mean'})
top = list(goals['Team Name'].value_counts().nlargest(10).index)
goalsTop = goals[goals['Team Name'].isin(top)]
matriz = goalsTop.pivot_table(values='Goals', index='Team Name', columns='Year', aggfunc='mean').fillna(0)
matriz | code |
105176386/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
def cantidad_goles(texto):
return texto.count('G') - texto.count('OG')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
def min_goles(texto):
eventos = texto.split("' ")
goles = [e.replace("'", '') for e in eventos if e and e[0] == 'G']
return [int(g[1:]) for g in goles]
plt.xlim((0, 120))
plt.yticks([])
matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'})
matches.groupby(['Away Team Name', 'Year']).agg({'Away Team Goals': 'mean'})
home_goals = matches[['Home Team Name', 'Year', 'Home Team Goals']].rename(columns={'Home Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
away_goals = matches[['Away Team Name', 'Year', 'Away Team Goals']].rename(columns={'Away Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
goals = pd.concat([home_goals, away_goals], ignore_index=True)
goals.groupby(['Team Name', 'Year']).agg({'Goals': 'mean'})
sns.heatmap(goals.pivot_table(values='Goals', index='Team Name', columns='Year', aggfunc='mean').fillna(0)) | code |
105176386/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
def cantidad_goles(texto):
return texto.count('G') - texto.count('OG')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
def min_goles(texto):
eventos = texto.split("' ")
goles = [e.replace("'", '') for e in eventos if e and e[0] == 'G']
return [int(g[1:]) for g in goles]
plt.xlim((0, 120))
plt.yticks([])
plt.hist(players['Event'].fillna('').map(min_goles).sum(), color='#9FC131')
plt.ylabel('Probabilidad')
plt.xlabel('Minuto')
plt.title('Distribución de los minutos en los que ocurren goles') | code |
105176386/cell_37 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
def cantidad_goles(texto):
return texto.count('G') - texto.count('OG')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
def min_goles(texto):
eventos = texto.split("' ")
goles = [e.replace("'", '') for e in eventos if e and e[0] == 'G']
return [int(g[1:]) for g in goles]
plt.xlim((0, 120))
plt.yticks([])
matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'})
matches.groupby(['Away Team Name', 'Year']).agg({'Away Team Goals': 'mean'})
home_goals = matches[['Home Team Name', 'Year', 'Home Team Goals']].rename(columns={'Home Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
away_goals = matches[['Away Team Name', 'Year', 'Away Team Goals']].rename(columns={'Away Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
goals = pd.concat([home_goals, away_goals], ignore_index=True)
goals.groupby(['Team Name', 'Year']).agg({'Goals': 'mean'})
top = list(goals['Team Name'].value_counts().nlargest(10).index)
goalsTop = goals[goals['Team Name'].isin(top)]
matriz = goalsTop.pivot_table(values='Goals', index='Team Name', columns='Year', aggfunc='mean').fillna(0)
matriz
plt.figure(dpi=125)
sns.boxplot(data=goalsTop, x='Year', y='Goals')
plt.ylim((0, 7))
plt.xticks(rotation=90) | code |
105176386/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches | code |
105176386/cell_36 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv')
matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv')
cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv')
matches = matches.dropna()
partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64')
def cantidad_goles(texto):
return texto.count('G') - texto.count('OG')
jugadores_goles = players[['MatchID', 'Player Name', 'goles']]
goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner')
goles
inicio = goles.groupby('Player Name').agg({'Year': 'min'})
inicio = inicio.to_dict()['Year']
def min_goles(texto):
eventos = texto.split("' ")
goles = [e.replace("'", '') for e in eventos if e and e[0] == 'G']
return [int(g[1:]) for g in goles]
plt.xlim((0, 120))
plt.yticks([])
matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'})
matches.groupby(['Away Team Name', 'Year']).agg({'Away Team Goals': 'mean'})
home_goals = matches[['Home Team Name', 'Year', 'Home Team Goals']].rename(columns={'Home Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
away_goals = matches[['Away Team Name', 'Year', 'Away Team Goals']].rename(columns={'Away Team Name': 'Team Name', 'Home Team Goals': 'Goals'})
goals = pd.concat([home_goals, away_goals], ignore_index=True)
goals.groupby(['Team Name', 'Year']).agg({'Goals': 'mean'})
top = list(goals['Team Name'].value_counts().nlargest(10).index)
goalsTop = goals[goals['Team Name'].isin(top)]
matriz = goalsTop.pivot_table(values='Goals', index='Team Name', columns='Year', aggfunc='mean').fillna(0)
matriz
plt.figure(dpi=175)
sns.heatmap(matriz, square=True, cmap=sns.light_palette('seagreen', as_cmap=True), yticklabels=top)
plt.title('Goles promedio por selección y año')
plt.xlabel('Año')
plt.ylabel('Selección') | code |
88090632/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df10B = pd.read_excel('../input/trabajohoras2/TrabajoHoras.xlsx')
df10B.loc[df10B['Nombres'] == 'Victoria'] | code |
2000591/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
print('there are {} employees evaluated more than 0.7'.format(len(mydataset[mydataset['last_evaluation'] > 0.7])))
print('there are {} employees evaluated less than 0.7'.format(len(mydataset[mydataset['last_evaluation'] <= 0.7])))
print('there are {} employees satisfication level more than 0.6'.format(len(mydataset[mydataset['satisfaction_level'] > 0.6])))
print('there are {} employees satisfication level less than 0.6'.format(len(mydataset[mydataset['satisfaction_level'] <= 0.6])))
print('there are {} employees have project more than 3.80'.format(len(mydataset[mydataset['number_project'] > 3.8])))
print('there are {} employees have project less than 3.80'.format(len(mydataset[mydataset['number_project'] <= 3.8])))
print('there are {} employees that spend average monthly hours more than 201.050337'.format(len(mydataset[mydataset['average_montly_hours'] > 201.050337])))
print('there are {} employees that spend average monthly hours less than 201.050337'.format(len(mydataset[mydataset['average_montly_hours'] <= 201.050337]))) | code |
2000591/cell_23 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
sns.countplot(x='number_project', hue='left', data=mydataset)
plt.title('Number of People Doing project and still they left') | code |
2000591/cell_20 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
temp3 = pd.crosstab(mydataset['sales'], mydataset['salary'])
temp3.plot(kind='bar', stacked=True, color=['red', 'blue', 'Green'], grid=False) | code |
2000591/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape | code |
2000591/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
sns.factorplot('number_project', 'average_montly_hours', hue='left', data=mydataset) | code |
2000591/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10, 10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True, annot=True)
sns.plt.title('Correlation Matrix Heatmap') | code |
2000591/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
plt.xticks(rotation=90)
sns.countplot(x='sales', hue='left', data=mydataset)
plt.title('Number of people left from particular department')
plt.xticks(rotation=90) | code |
2000591/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.head() | code |
2000591/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
sns.countplot(x='sales', data=mydataset)
plt.title('Distribution of employess across departments')
plt.xticks(rotation=90) | code |
2000591/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
sns.factorplot('number_project', 'satisfaction_level', hue='left', data=mydataset)
plt.title('Number of project vs Satisfaction level based on that people leaving and staying') | code |
2000591/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.describe() | code |
2000591/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
sns.factorplot('sales', col='salary', col_wrap=3, data=mydataset, kind='count', size=15, aspect=0.4) | code |
2000591/cell_16 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
mydataset.hist(figsize=(15, 15))
plt.show() | code |
2000591/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib as matplot
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn import cross_validation
from IPython.display import display
from sklearn.metrics import classification_report, confusion_matrix | code |
2000591/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
print('number of people stay =' + str(len(stay)))
print('Number of people left=' + str(len(left))) | code |
2000591/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean()
left = mydataset[mydataset.left == 1]
stay = mydataset[mydataset.left == 0]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
sns.countplot(x='number_project', hue='salary', data=mydataset)
plt.title('Number of project Vs Salary') | code |
2000591/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean()
mydataset.groupby(['sales', 'left']).mean() | code |
2000591/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes | code |
2000591/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
mydataset = pd.read_csv('HR_comma_sep.csv')
mydataset.shape
mydataset.dtypes
fig = plt.figure(figsize=(10,10))
corr = mydataset.corr()
sns.heatmap(corr, vmax=1, square=True,annot=True)
sns.plt.title('Correlation Matrix Heatmap')
mydataset.mean() | code |
2000591/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
mydataset = pd.read_csv('HR_comma_sep.csv') | code |
128011233/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges | code |
128011233/cell_34 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
G = nx.Graph()
G.add_edges_from(edges.values)
node_degrees = {}
for node in G.nodes():
node_degrees[node] = len(list(G.neighbors(node)))
def freedman_diaconis(data):
IQR = np.percentile(data, 75) - np.percentile(data, 25)
bin_width = 2 * IQR / len(data) ** (1 / 3)
data_range = max(data) - min(data)
num_bins = int(np.ceil(data_range / bin_width))
return num_bins
degrees_dist = np.array(list(node_degrees.values()))
plt.xlim([0, 500])
degrees, degree_counts = np.unique(degrees_dist, return_counts=True)
probs = degree_counts / sum(degree_counts)
plt.loglog(degrees, probs, 'bo', markersize=2)
log_degrees = np.log10(degrees)
log_probs = np.log10(probs)
m1, b1 = np.polyfit(log_degrees, log_probs, 1)
y = m1 * log_degrees + b1
def C_v_i(v):
neighborhood = list(G.neighbors(v))
n_i = len(neighborhood)
if n_i < 2:
return 0
v_i = 0
for i in range(n_i):
for j in range(i + 1, n_i):
if G.has_edge(neighborhood[i], neighborhood[j]):
v_i += 1
max_n_i = n_i * (n_i - 1)
return 2 * (v_i / max_n_i)
coefficients = []
for node in G.nodes():
coefficients.append(C_v_i(node))
C_G = np.average(coefficients)
print('Clustering coefficient of a graph G is: ', C_G) | code |
128011233/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
nodes['education'].value_counts() | code |
128011233/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
G = nx.Graph()
G.add_edges_from(edges.values)
node_degrees = {}
for node in G.nodes():
node_degrees[node] = len(list(G.neighbors(node)))
def freedman_diaconis(data):
IQR = np.percentile(data, 75) - np.percentile(data, 25)
bin_width = 2 * IQR / len(data) ** (1 / 3)
data_range = max(data) - min(data)
num_bins = int(np.ceil(data_range / bin_width))
return num_bins
degrees_dist = np.array(list(node_degrees.values()))
plt.xlim([0, 500])
degrees, degree_counts = np.unique(degrees_dist, return_counts=True)
probs = degree_counts / sum(degree_counts)
plt.loglog(degrees, probs, 'bo', markersize=2)
log_degrees = np.log10(degrees)
log_probs = np.log10(probs)
m1, b1 = np.polyfit(log_degrees, log_probs, 1)
y = m1 * log_degrees + b1
plt.plot(10 ** log_degrees, 10 ** y, c='red')
plt.xlabel('Degree')
plt.ylabel('Probability')
plt.show()
print('Slope: ', -m1) | code |
128011233/cell_28 | [
"text_html_output_1.png"
] | import networkx as nx
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
G = nx.Graph()
G.add_edges_from(edges.values)
radius = nx.radius(G)
diameter = nx.diameter(G)
print('Radius: ', radius)
print('Diameter: ', diameter) | code |
128011233/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes | code |
128011233/cell_31 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
nodes = pd.read_csv('../input/tolokers/nodes.tsv', sep='\t', index_col='id')
nodes
edges = pd.read_csv('../input/tolokers/edges.tsv', sep='\t')
edges
G = nx.Graph()
G.add_edges_from(edges.values)
node_degrees = {}
for node in G.nodes():
node_degrees[node] = len(list(G.neighbors(node)))
def freedman_diaconis(data):
IQR = np.percentile(data, 75) - np.percentile(data, 25)
bin_width = 2 * IQR / len(data) ** (1 / 3)
data_range = max(data) - min(data)
num_bins = int(np.ceil(data_range / bin_width))
return num_bins
degrees_dist = np.array(list(node_degrees.values()))
plt.hist(degrees_dist, bins=freedman_diaconis(degrees_dist))
plt.xlim([0, 500])
plt.xlabel('Degree')
plt.ylabel('Frequency')
plt.show() | code |
121149218/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum() | code |
121149218/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.head(5) | code |
121149218/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape | code |
121149218/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv.describe() | code |
121149218/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['type'].value_counts().plot(kind='pie', autopct='%.2f') | code |
121149218/cell_33 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
mv['country'].value_counts().head(10).plot(kind='bar')
plt.show() | code |
121149218/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
mv.head(6) | code |
121149218/cell_39 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
mv.groupby('listed_in')['title'].count().sort_values(ascending=False).reset_index()[:5]
len(mv[mv['release_year'] == 2021]) | code |
121149218/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
mv.groupby('listed_in')['title'].count().sort_values(ascending=False).reset_index()[:5]
z = mv.groupby(['rating']).size().reset_index(name='count')
z | code |
121149218/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv | code |
121149218/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes | code |
121149218/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv | code |
121149218/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.tail(5) | code |
121149218/cell_3 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
121149218/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3) | code |
121149218/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
mv.groupby('listed_in')['title'].count().sort_values(ascending=False).reset_index()[:5] | code |
121149218/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
a = mv['director'].value_counts().reset_index()[1:11]
a | code |
121149218/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
pd.crosstab(mv['country'], 'counts') | code |
121149218/cell_37 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
mv = pd.read_csv('/kaggle/input/netflix-shows/netflix_titles.csv')
mv.shape
mv.sample(3)
mv.dtypes
mv.isnull().sum()
mv['duration'] = mv['duration'].str.replace(' min', '')
mv.groupby('listed_in')['title'].count().sort_values(ascending=False).reset_index()[:5]
mv[mv['country'] == 'India'].listed_in.value_counts() | code |
17123294/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
missing_df.head(5) | code |
17123294/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
train_df.info() | code |
17123294/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style='darkgrid')
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns
temp_df = train_df.copy()
temp_df['Cabin'] = temp_df['Cabin'].fillna('Unknown')
occ_cabins = temp_df['Cabin'].copy()
occ_cabins[occ_cabins != 'Unknown'] = 'Known'
temp_df['Cabin'] = occ_cabins
females = train_df[train_df['Sex'] == 'female']
males = train_df[train_df['Sex'] == 'male']
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
ax = sns.distplot(males[males['Survived'] == 1]['Age'].dropna(), bins=10, kde=False, label='survived')
ax = sns.distplot(males[males['Survived'] == 0]['Age'].dropna(), bins=10, kde=False, label='not survived')
ax.legend()
ax.set_title('Male')
plt.subplot(1, 2, 2)
ax = sns.distplot(females[females['Survived'] == 1]['Age'].dropna(), kde=False, label='survived')
ax = sns.distplot(females[females['Survived'] == 0]['Age'].dropna(), kde=False, label='not survived')
ax.legend()
ax.set_title('Female')
plt.show()
plt.figure(figsize=(5.5, 5))
sns.boxplot(x='Survived', y='Age', data=train_df) | code |
17123294/cell_40 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style='darkgrid')
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns
temp_df = train_df.copy()
temp_df['Cabin'] = temp_df['Cabin'].fillna('Unknown')
occ_cabins = temp_df['Cabin'].copy()
occ_cabins[occ_cabins != 'Unknown'] = 'Known'
temp_df['Cabin'] = occ_cabins
females = train_df[train_df['Sex'] == 'female']
males = train_df[train_df['Sex'] == 'male']
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
ax = sns.distplot(males[males['Survived'] == 1]['Age'].dropna(), bins=10, kde=False, label='survived')
ax = sns.distplot(males[males['Survived'] == 0]['Age'].dropna(), bins=10, kde=False, label='not survived')
ax.legend()
ax.set_title('Male')
plt.subplot(1, 2, 2)
ax = sns.distplot(females[females['Survived'] == 1]['Age'].dropna(), kde=False, label='survived')
ax = sns.distplot(females[females['Survived'] == 0]['Age'].dropna(), kde=False, label='not survived')
ax.legend()
ax.set_title('Female')
plt.show()
plt.figure(figsize=(5.5, 5))
sns.pointplot(x='Pclass', y='Survived', data=train_df) | code |
17123294/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style='darkgrid')
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns
temp_df = train_df.copy()
temp_df['Cabin'] = temp_df['Cabin'].fillna('Unknown')
occ_cabins = temp_df['Cabin'].copy()
occ_cabins[occ_cabins != 'Unknown'] = 'Known'
temp_df['Cabin'] = occ_cabins
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
sns.barplot(x='Sex', y='Survived', data=train_df)
plt.subplot(1, 2, 2)
sns.violinplot(x='Survived', y='Age', data=train_df, hue='Sex', split=True) | code |
17123294/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style='darkgrid')
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns
temp_df = train_df.copy()
temp_df['Cabin'] = temp_df['Cabin'].fillna('Unknown')
occ_cabins = temp_df['Cabin'].copy()
occ_cabins[occ_cabins != 'Unknown'] = 'Known'
temp_df['Cabin'] = occ_cabins
plt.figure(figsize=(5.5, 5))
sns.barplot(x='Cabin', y='Survived', data=temp_df)
sns.pointplot(x='Cabin', y='Survived', data=temp_df, color='k') | code |
17123294/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
train_df.head() | code |
17123294/cell_32 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style='darkgrid')
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns
temp_df = train_df.copy()
temp_df['Cabin'] = temp_df['Cabin'].fillna('Unknown')
occ_cabins = temp_df['Cabin'].copy()
occ_cabins[occ_cabins != 'Unknown'] = 'Known'
temp_df['Cabin'] = occ_cabins
females = train_df[train_df['Sex'] == 'female']
males = train_df[train_df['Sex'] == 'male']
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
ax = sns.distplot(males[males['Survived'] == 1]['Age'].dropna(), bins=10, kde=False, label='survived')
ax = sns.distplot(males[males['Survived'] == 0]['Age'].dropna(), bins=10, kde=False, label='not survived')
ax.legend()
ax.set_title('Male')
plt.subplot(1, 2, 2)
ax = sns.distplot(females[females['Survived'] == 1]['Age'].dropna(), kde=False, label='survived')
ax = sns.distplot(females[females['Survived'] == 0]['Age'].dropna(), kde=False, label='not survived')
ax.legend()
ax.set_title('Female')
plt.show() | code |
17123294/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
train_df.describe() | code |
17123294/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib
import numpy
import pandas
import seaborn
import sklearn
import sys
import sys
print('Python: {}'.format(sys.version))
import numpy
print('numpy: {}'.format(numpy.__version__))
import pandas
print('pandas: {}'.format(pandas.__version__))
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
import seaborn
print('seaborn: {}'.format(seaborn.__version__))
import sklearn
print('sklearn: {}'.format(sklearn.__version__)) | code |
17123294/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns | code |
17123294/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape | code |
17123294/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(style='darkgrid')
train_path = '../input/train.csv'
test_path = '../input/test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.shape
missing_data = train_df.isnull().sum()
percent_missing = round(missing_data / train_df.isnull().count() * 100, 2)
missing_df = pd.concat([missing_data.sort_values(ascending=False), percent_missing.sort_values(ascending=False)], axis=1, keys=['Total', 'Percent'])
train_df.columns
temp_df = train_df.copy()
temp_df['Cabin'] = temp_df['Cabin'].fillna('Unknown')
occ_cabins = temp_df['Cabin'].copy()
occ_cabins[occ_cabins != 'Unknown'] = 'Known'
temp_df['Cabin'] = occ_cabins
females = train_df[train_df['Sex'] == 'female']
males = train_df[train_df['Sex'] == 'male']
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
ax = sns.distplot(males[males['Survived'] == 1]['Age'].dropna(), bins=10, kde=False, label='survived')
ax = sns.distplot(males[males['Survived'] == 0]['Age'].dropna(), bins=10, kde=False, label='not survived')
ax.legend()
ax.set_title('Male')
plt.subplot(1, 2, 2)
ax = sns.distplot(females[females['Survived'] == 1]['Age'].dropna(), kde=False, label='survived')
ax = sns.distplot(females[females['Survived'] == 0]['Age'].dropna(), kde=False, label='not survived')
ax.legend()
ax.set_title('Female')
plt.show()
plt.figure(figsize=(5.5, 5))
sns.boxplot(x='Survived', y='Fare', data=train_df) | code |
1003448/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
print('Skewness: %f' % train['SalePrice'].skew())
print('Kurtosis: %f' % train['SalePrice'].kurt()) | code |
1003448/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y) | code |
1003448/cell_20 | [
"image_output_1.png"
] | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000)) | code |
1003448/cell_40 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y)
lasso = LassoCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
lasso = LassoCV(alphas=[alpha * 0.6, alpha * 0.65, alpha * 0.7, alpha * 0.75, alpha * 0.8, alpha * 0.85, alpha * 0.9, alpha * 0.95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
y_train_las = lasso.predict(X_train)
y_test_las = lasso.predict(X_test)
plt.hlines(y=0, xmin=10.5, xmax=13.5, color='red')
coefs = pd.Series(lasso.coef_, index=X_train.columns)
imp_coefs = pd.concat([coefs.sort_values().head(10), coefs.sort_values().tail(10)])
alphas = [0.07, 0.1, 0.3, 1, 6, 7, 13, 26, 52, 78, 104]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
xgb_preds = np.expm1(model_xgb.predict(X_test))
lasso_preds = np.expm1(model_lasso.predict(X_test))
predictions = pd.DataFrame({'xgb': xgb_preds, 'lasso': lasso_preds})
predictions.plot(x='xgb', y='lasso', kind='scatter') | code |
1003448/cell_29 | [
"application_vnd.jupyter.stderr_output_1.png"
] | model.loc[30:, ['test-rmse-mean', 'train-rmse-mean']].plot() | code |
1003448/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y)
xgb_preds = np.expm1(model_xgb.predict(X_test))
lasso_preds = np.expm1(model_lasso.predict(X_test)) | code |
1003448/cell_41 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y)
xgb_preds = np.expm1(model_xgb.predict(X_test))
lasso_preds = np.expm1(model_lasso.predict(X_test))
preds = 0.6 * lasso_preds + 0.4 * xgb_preds | code |
1003448/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'].describe() | code |
1003448/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range) | code |
1003448/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
lasso = LassoCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
print('Best alpha :', alpha)
print('Try again for more precision with alphas centered around ' + str(alpha))
lasso = LassoCV(alphas=[alpha * 0.6, alpha * 0.65, alpha * 0.7, alpha * 0.75, alpha * 0.8, alpha * 0.85, alpha * 0.9, alpha * 0.95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
print('Best alpha :', alpha)
print('Lasso RMSE on Training set :', rmse_cv_train(lasso).mean())
print('Lasso RMSE on Test set :', rmse_cv_test(lasso).mean())
y_train_las = lasso.predict(X_train)
y_test_las = lasso.predict(X_test)
plt.scatter(y_train_las, y_train_las - y_train, c='blue', marker='s', label='Training data')
plt.scatter(y_test_las, y_test_las - y_test, c='lightgreen', marker='s', label='Validation data')
plt.title('Linear regression with Lasso regularization')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=10.5, xmax=13.5, color='red')
plt.show()
plt.scatter(y_train_las, y_train, c='blue', marker='s', label='Training data')
plt.scatter(y_test_las, y_test, c='lightgreen', marker='s', label='Validation data')
plt.title('Linear regression with Lasso regularization')
plt.xlabel('Predicted values')
plt.ylabel('Real values')
plt.legend(loc='upper left')
plt.plot([10.5, 13.5], [10.5, 13.5], c='red')
plt.show()
coefs = pd.Series(lasso.coef_, index=X_train.columns)
print('Lasso picked ' + str(sum(coefs != 0)) + ' features and eliminated the other ' + str(sum(coefs == 0)) + ' features')
imp_coefs = pd.concat([coefs.sort_values().head(10), coefs.sort_values().tail(10)])
imp_coefs.plot(kind='barh')
plt.title('Coefficients in the Lasso Model')
plt.show() | code |
1003448/cell_15 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
sns.distplot(train['SalePrice']) | code |
1003448/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y='SalePrice', data=data)
fig.axis(ymin=0, ymax=800000) | code |
1003448/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
prices.hist() | code |
1003448/cell_35 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
lasso = LassoCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
lasso = LassoCV(alphas=[alpha * 0.6, alpha * 0.65, alpha * 0.7, alpha * 0.75, alpha * 0.8, alpha * 0.85, alpha * 0.9, alpha * 0.95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
y_train_las = lasso.predict(X_train)
y_test_las = lasso.predict(X_test)
plt.hlines(y=0, xmin=10.5, xmax=13.5, color='red')
coefs = pd.Series(lasso.coef_, index=X_train.columns)
imp_coefs = pd.concat([coefs.sort_values().head(10), coefs.sort_values().tail(10)])
alphas = [0.07, 0.1, 0.3, 1, 6, 7, 13, 26, 52, 78, 104]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas] | code |
1003448/cell_14 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000)) | code |
1003448/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000)) | code |
1003448/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train.head() | code |
1003448/cell_37 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
lasso = LassoCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
lasso = LassoCV(alphas=[alpha * 0.6, alpha * 0.65, alpha * 0.7, alpha * 0.75, alpha * 0.8, alpha * 0.85, alpha * 0.9, alpha * 0.95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
y_train_las = lasso.predict(X_train)
y_test_las = lasso.predict(X_test)
plt.hlines(y=0, xmin=10.5, xmax=13.5, color='red')
coefs = pd.Series(lasso.coef_, index=X_train.columns)
imp_coefs = pd.concat([coefs.sort_values().head(10), coefs.sort_values().tail(10)])
alphas = [0.07, 0.1, 0.3, 1, 6, 7, 13, 26, 52, 78, 104]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
cv_ridge.min() | code |
1003448/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'].describe() | code |
1003448/cell_36 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
all_data = all_data.replace({'Utilities': {'AllPub': 1, 'NoSeWa': 0, 'NoSewr': 0, 'ELO': 0}, 'Street': {'Pave': 1, 'Grvl': 0}, 'FireplaceQu': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoFireplace': 0}, 'Fence': {'GdPrv': 2, 'GdWo': 2, 'MnPrv': 1, 'MnWw': 1, 'NoFence': 0}, 'ExterQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'ExterCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'BsmtQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'BsmtExposure': {'Gd': 3, 'Av': 2, 'Mn': 1, 'No': 0, 'NoBsmt': 0}, 'BsmtCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoBsmt': 0}, 'GarageQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'GarageCond': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NoGarage': 0}, 'KitchenQual': {'Ex': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1}, 'Functional': {'Typ': 0, 'Min1': 1, 'Min2': 1, 'Mod': 2, 'Maj1': 3, 'Maj2': 4, 'Sev': 5, 'Sal': 6}})
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
#box plot overallqual/saleprice
var = 'OverallQual'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis])
low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]
high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]
var = 'GrLivArea'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
train.sort_values(by='GrLivArea', ascending=False)[:2]
train = train.drop(train[train['Id'] == 1299].index)
train = train.drop(train[train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
var = 'TotalBsmtSF'
data = pd.concat([train['SalePrice'], train[var]], axis=1)
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
lasso = LassoCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
lasso = LassoCV(alphas=[alpha * 0.6, alpha * 0.65, alpha * 0.7, alpha * 0.75, alpha * 0.8, alpha * 0.85, alpha * 0.9, alpha * 0.95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15, alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4], max_iter=50000, cv=10)
lasso.fit(X_train, y)
alpha = lasso.alpha_
y_train_las = lasso.predict(X_train)
y_test_las = lasso.predict(X_test)
plt.hlines(y=0, xmin=10.5, xmax=13.5, color='red')
coefs = pd.Series(lasso.coef_, index=X_train.columns)
imp_coefs = pd.concat([coefs.sort_values().head(10), coefs.sort_values().tail(10)])
alphas = [0.07, 0.1, 0.3, 1, 6, 7, 13, 26, 52, 78, 104]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
cv_ridge.plot(title='Validation: Ridge model')
plt.xlabel('alpha')
plt.ylabel('rmse') | code |
2033577/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/fashion-mnist_train.csv')
train_df.head(5) | code |
2033577/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import keras
from keras.layers import *
from keras.models import *
import matplotlib.pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2033577/cell_7 | [
"text_plain_output_1.png"
] | import keras
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/fashion-mnist_train.csv')
batch_size = 128
num_classes = 10
epochs = 10
img_rows, img_cols = (28, 28)
y_train = keras.utils.to_categorical(train_df.label.values, num_classes)
x_train = np.array([row.reshape((img_rows, img_cols, 1)) for row in train_df.drop('label', axis=1, inplace=False).values])
def model(input_shape):
x_input = Input(input_shape)
x = Conv2D(20, (5, 5), strides=(1, 1), name='conv0')(x_input)
x = BatchNormalization(axis=3, name='bn0')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='max_pool0')(x)
x = Conv2D(25, (3, 3), strides=(1, 1), padding='same', name='conv1')(x)
x = BatchNormalization(axis=3, name='bn1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='max_pool1')(x)
x = Conv2D(30, (1, 1), strides=(1, 1), padding='same', name='conv2')(x)
x = BatchNormalization(axis=3, name='bn2')(x)
x = Activation('relu')(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(128, activation='relu', name='fc0')(x)
x = Dropout(0.5)(x)
x = Dense(num_classes, activation='softmax', name='fc1')(x)
return Model(inputs=x_input, outputs=x, name='Fashion_MNIST')
input_shape = (img_rows, img_cols, 1)
fashionmodel = model(input_shape)
fashionmodel.summary()
fashionmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = fashionmodel.fit(x_train, y_train, epochs=epochs, batch_size=batch_size) | code |
2033577/cell_8 | [
"text_plain_output_1.png"
] | import keras
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/fashion-mnist_train.csv')
batch_size = 128
num_classes = 10
epochs = 10
img_rows, img_cols = (28, 28)
y_train = keras.utils.to_categorical(train_df.label.values, num_classes)
x_train = np.array([row.reshape((img_rows, img_cols, 1)) for row in train_df.drop('label', axis=1, inplace=False).values])
def model(input_shape):
x_input = Input(input_shape)
x = Conv2D(20, (5, 5), strides=(1, 1), name='conv0')(x_input)
x = BatchNormalization(axis=3, name='bn0')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='max_pool0')(x)
x = Conv2D(25, (3, 3), strides=(1, 1), padding='same', name='conv1')(x)
x = BatchNormalization(axis=3, name='bn1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='max_pool1')(x)
x = Conv2D(30, (1, 1), strides=(1, 1), padding='same', name='conv2')(x)
x = BatchNormalization(axis=3, name='bn2')(x)
x = Activation('relu')(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(128, activation='relu', name='fc0')(x)
x = Dropout(0.5)(x)
x = Dense(num_classes, activation='softmax', name='fc1')(x)
return Model(inputs=x_input, outputs=x, name='Fashion_MNIST')
input_shape = (img_rows, img_cols, 1)
fashionmodel = model(input_shape)
fashionmodel.summary()
fashionmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = fashionmodel.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)
print(history.history.keys())
plt.plot(history.history['acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train'], loc='upper left')
plt.show() | code |
2033577/cell_3 | [
"text_plain_output_1.png"
] | import keras
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/fashion-mnist_train.csv')
batch_size = 128
num_classes = 10
epochs = 10
img_rows, img_cols = (28, 28)
y_train = keras.utils.to_categorical(train_df.label.values, num_classes)
print('y_train: ', y_train.shape)
x_train = np.array([row.reshape((img_rows, img_cols, 1)) for row in train_df.drop('label', axis=1, inplace=False).values])
print('x_train: ', x_train.shape) | code |
2033577/cell_5 | [
"text_plain_output_1.png"
] | import keras
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/fashion-mnist_train.csv')
batch_size = 128
num_classes = 10
epochs = 10
img_rows, img_cols = (28, 28)
y_train = keras.utils.to_categorical(train_df.label.values, num_classes)
x_train = np.array([row.reshape((img_rows, img_cols, 1)) for row in train_df.drop('label', axis=1, inplace=False).values])
def model(input_shape):
x_input = Input(input_shape)
x = Conv2D(20, (5, 5), strides=(1, 1), name='conv0')(x_input)
x = BatchNormalization(axis=3, name='bn0')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='max_pool0')(x)
x = Conv2D(25, (3, 3), strides=(1, 1), padding='same', name='conv1')(x)
x = BatchNormalization(axis=3, name='bn1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='max_pool1')(x)
x = Conv2D(30, (1, 1), strides=(1, 1), padding='same', name='conv2')(x)
x = BatchNormalization(axis=3, name='bn2')(x)
x = Activation('relu')(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(128, activation='relu', name='fc0')(x)
x = Dropout(0.5)(x)
x = Dense(num_classes, activation='softmax', name='fc1')(x)
return Model(inputs=x_input, outputs=x, name='Fashion_MNIST')
input_shape = (img_rows, img_cols, 1)
fashionmodel = model(input_shape)
fashionmodel.summary() | code |
121152202/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv')
df.drop(columns='id', inplace=True)
df_add = pd.read_csv('/kaggle/input/predict-concrete-strength/ConcreteStrengthData.csv')
df_add.rename(columns={'CementComponent ': 'CementComponent'}, inplace=True)
df = pd.concat([df, df_add])
df_test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv')
df_test.drop(columns='id', inplace=True)
y = df.pop('Strength')
df['tot_comp'] = df.iloc[:, :7].sum(axis=1)
df['ageinmonth'] = df.AgeInDays // 30 / 12
df['AgeInDays'] = df.AgeInDays / 365
df_test['tot_comp'] = df_test.iloc[:, :7].sum(axis=1)
df_test['ageinmonth'] = df_test.AgeInDays // 30 / 12
df_test['AgeInDays'] = df_test.AgeInDays / 365
df_transform = df.iloc[:, :7].transform(lambda x: x / df.tot_comp)
df_transform = pd.concat([df_transform, df.AgeInDays, df.ageinmonth], axis=1)
df_test_transform = df_test.iloc[:, :7].transform(lambda x: x / df_test.tot_comp)
df_test_transform = pd.concat([df_test_transform, df_test.AgeInDays, df_test.ageinmonth], axis=1)
subs = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv')
subs.head() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.