path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
89141106/cell_82 | [
"text_plain_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
grafico_pizza(dados, 'Incl. ST', ('#5735FD', '#3C78E8', '#2E90FF'), (0.05, 0.05, 0.05), 'Inclinação ST', 17) | code |
89141106/cell_51 | [
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
# Definir o gráfico de influência
def grafico_influencia(data_frame, coluna, bins, labels, com_bins=True):
# Armazenar os dados da coluna
influencia = data_frame.loc[:, [coluna, 'DCV']]
# Se os dados de "coluna" não forem como classe, então terá intervalos ("bins" e "labels")
if com_bins:
influencia[coluna] = pd.cut(influencia[coluna],
bins=bins,
labels=labels)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Determinar o tamanho da figura
plt.figure(figsize=(15, 5))
# Criar o gráfico
grafico = sns.pointplot(x=coluna, y='DCV', dodge=0.1, capsize=.1, data=influencia, color=cor)
# Colocar o título do gráfico
grafico.set_title(f'{coluna} influência', fontsize=25)
grafico_influencia(dados, 'Tipo de dor', None, None, False) | code |
89141106/cell_62 | [
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
grafico_pizza(dados, 'Glicemia', ('#140E36', '#091AAB'), (0.05, 0.05), 'Glicemia', 25) | code |
89141106/cell_59 | [
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
# Definir o gráfico de influência
def grafico_influencia(data_frame, coluna, bins, labels, com_bins=True):
# Armazenar os dados da coluna
influencia = data_frame.loc[:, [coluna, 'DCV']]
# Se os dados de "coluna" não forem como classe, então terá intervalos ("bins" e "labels")
if com_bins:
influencia[coluna] = pd.cut(influencia[coluna],
bins=bins,
labels=labels)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Determinar o tamanho da figura
plt.figure(figsize=(15, 5))
# Criar o gráfico
grafico = sns.pointplot(x=coluna, y='DCV', dodge=0.1, capsize=.1, data=influencia, color=cor)
# Colocar o título do gráfico
grafico.set_title(f'{coluna} influência', fontsize=25)
grafico_influencia(dados, 'Colesterol', [0, 150, 200, 250, 300, 350, 400, 1000], ['0-150', '150-200', '200-250', '250-300', '300-350', '350-400', '400+']) | code |
89141106/cell_58 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
grafico_distribuicao(dados, 'Colesterol', 'Colesterol') | code |
89141106/cell_78 | [
"text_plain_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
grafico_distribuicao(dados, 'Oldpeak', 'Oldpeak') | code |
89141106/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados | code |
89141106/cell_75 | [
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
# Definir o gráfico de influência
def grafico_influencia(data_frame, coluna, bins, labels, com_bins=True):
# Armazenar os dados da coluna
influencia = data_frame.loc[:, [coluna, 'DCV']]
# Se os dados de "coluna" não forem como classe, então terá intervalos ("bins" e "labels")
if com_bins:
influencia[coluna] = pd.cut(influencia[coluna],
bins=bins,
labels=labels)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Determinar o tamanho da figura
plt.figure(figsize=(15, 5))
# Criar o gráfico
grafico = sns.pointplot(x=coluna, y='DCV', dodge=0.1, capsize=.1, data=influencia, color=cor)
# Colocar o título do gráfico
grafico.set_title(f'{coluna} influência', fontsize=25)
grafico_influencia(dados, 'Dor por exec.', None, None, False) | code |
89141106/cell_66 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
grafico_pizza(dados, 'Eletro', ('#5735FD', '#3C78E8', '#2E90FF'), (0.05, 0.05, 0.05), 'Eletrocardiograma', 25) | code |
89141106/cell_93 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
colunas = ['Sexo', 'Tipo de dor', 'Glicemia', 'Eletro', 'Dor por exec.', 'Incl. ST', 'DCV']
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
# Definir o gráfico de influência
def grafico_influencia(data_frame, coluna, bins, labels, com_bins=True):
# Armazenar os dados da coluna
influencia = data_frame.loc[:, [coluna, 'DCV']]
# Se os dados de "coluna" não forem como classe, então terá intervalos ("bins" e "labels")
if com_bins:
influencia[coluna] = pd.cut(influencia[coluna],
bins=bins,
labels=labels)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Determinar o tamanho da figura
plt.figure(figsize=(15, 5))
# Criar o gráfico
grafico = sns.pointplot(x=coluna, y='DCV', dodge=0.1, capsize=.1, data=influencia, color=cor)
# Colocar o título do gráfico
grafico.set_title(f'{coluna} influência', fontsize=25)
colunas = [coluna for coluna in dados.columns if dados[coluna].dtype == 'object']
codificador = preprocessing.LabelEncoder()
for coluna in colunas:
dados[coluna] = codificador.fit_transform(dados[coluna])
plt.figure(figsize=(15, 10))
mascara = np.triu(dados.corr())
sns.heatmap(data=dados.corr(), cmap='Blues', mask=mascara, annot=True)
plt.show() | code |
89141106/cell_105 | [
"text_html_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
rfc = RandomForestClassifier(n_jobs=-1, n_estimators=500, max_depth=70, max_features=2, random_state=0)
knn = KNeighborsClassifier(n_neighbors=5, algorithm='kd_tree', weights='uniform', n_jobs=-1)
gbc = GradientBoostingClassifier(learning_rate=0.01, loss='exponential', max_depth=70, max_features=2, n_estimators=500, random_state=0)
rfc.fit(X_treino, y_treino) | code |
89141106/cell_27 | [
"image_output_1.png"
] | import pandas as pd
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
colunas = ['Sexo', 'Tipo de dor', 'Glicemia', 'Eletro', 'Dor por exec.', 'Incl. ST', 'DCV']
for coluna in colunas:
print(f'{coluna}: {dados[coluna].unique()}') | code |
89141106/cell_12 | [
"image_output_1.png"
] | import pandas as pd
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados | code |
89141106/cell_71 | [
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
# Definir o gráfico de influência
def grafico_influencia(data_frame, coluna, bins, labels, com_bins=True):
# Armazenar os dados da coluna
influencia = data_frame.loc[:, [coluna, 'DCV']]
# Se os dados de "coluna" não forem como classe, então terá intervalos ("bins" e "labels")
if com_bins:
influencia[coluna] = pd.cut(influencia[coluna],
bins=bins,
labels=labels)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Determinar o tamanho da figura
plt.figure(figsize=(15, 5))
# Criar o gráfico
grafico = sns.pointplot(x=coluna, y='DCV', dodge=0.1, capsize=.1, data=influencia, color=cor)
# Colocar o título do gráfico
grafico.set_title(f'{coluna} influência', fontsize=25)
grafico_influencia(dados, 'BPM', [0, 80, 100, 120, 140, 160, 180, 200, 1000], ['0-80', '80-100', '100-120', '120-140', '140-160', '160-180', '180-200', '200+']) | code |
89141106/cell_70 | [
"image_output_1.png"
] | from matplotlib import style
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
arquivo = '../input/heart-failure-prediction/heart.csv'
dados = pd.read_csv(arquivo)
dados
trocar_nomes = {'Age': 'Idade', 'Sex': 'Sexo', 'ChestPainType': 'Tipo de dor', 'RestingBP': 'Pressão', 'Cholesterol': 'Colesterol', 'FastingBS': 'Glicemia', 'RestingECG': 'Eletro', 'MaxHR': 'BPM', 'ExerciseAngina': 'Dor por exec.', 'ST_Slope': 'Incl. ST', 'HeartDisease': 'DCV'}
dados = dados.rename(columns=trocar_nomes)
dados
dados.describe().T
dados = dados[dados['Colesterol'] != 0]
dados = dados[dados['Pressão'] != 0]
dados.describe().T
sns.set_theme()
style.use('fivethirtyeight')
cores = ['lightcoral', 'deepskyblue', 'orchid', 'tomato', 'teal', 'darkcyan', 'limegreen', 'darkorange']
# Definir a função do gráfico de pizza
def grafico_pizza(data_frame, coluna, cores, explode, titulo, fonte):
# Fazer contagem dos valores da coluna selecionada
df = data_frame[coluna].value_counts()
# Determinar o tamannho da plotagem
plt.figure(figsize=(15, 10))
# Criar o gráfico de pizza
_, _, pacotes = plt.pie(df,
colors=cores,
labels=df.index,
explode=explode,
shadow=True,
startangle=90,
autopct='%1.1f%%',
textprops={'fontsize': fonte,
'color': 'black',
'weight': 'bold',
'family': 'serif'})
# Plotar o gráfico de pizza
plt.setp(pacotes, color='white')
# Colocar o título do gráfico
plt.title(titulo, size=45)
# Desenhar o círculo interno
circulo_centro = plt.Circle((0, 0), 0.40, fc='white')
fig = plt.gcf()
fig.gca().add_artist(circulo_centro)
# Definir o gráfico da função de distribuição
def grafico_distribuicao(data_frame, coluna, titulo):
# Armazenar os dados da coluna
dados = data_frame[coluna]
# Determinar a figura e seu tamanho
fig = plt.figure(figsize=(17, 7))
# Criar a grade em que os gráficos serão plotados
grade = GridSpec(nrows=2, ncols=1, figure=fig)
# Escolher uma das cores para o gráfico
cor = np.random.choice(cores, 1)[0]
# Motrar o valor de assimetria dos dados
print(f'Assimetria de {titulo}: {np.round(dados.skew(), 3)}')
# Plotar o histograma
ax0 = fig.add_subplot(grade[0, :])
ax0.set_title(f'Histograma e BoxPlot de {titulo}', y=1.05)
sns.histplot(data=dados, ax=ax0, color=cor)
# Plotar o BoxPlot
ax1 = fig.add_subplot(grade[1, :])
plt.axis('off')
sns.boxplot(x=dados, ax=ax1, color=cor)
grafico_distribuicao(dados, 'BPM', 'Batimento Cardíaco Máximo') | code |
18104935/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
X_con_col = list(X_con.columns)
#outlier detection and replacing them with mean
def outlier_detect(df):
for i in df.describe().columns:
Q1=df.describe().at['25%',i]
Q3=df.describe().at['75%',i]
IQR=Q3 - Q1
LTV=Q1 - 1.5 * IQR
UTV=Q3 + 1.5 * IQR
x=np.array(df[i])
p=[]
for j in x:
if j < LTV or j>UTV:
p.append(df[i].median())
else:
p.append(j)
df[i]=p
return df
X_con = outlier_detect(X_con)
X_con.isnull().sum() | code |
18104935/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
sns.heatmap(df.corr()) | code |
18104935/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
df['SalePrice'].isnull().sum() | code |
18104935/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
X_con_col = list(X_con.columns)
#outlier detection and replacing them with mean
def outlier_detect(df):
for i in df.describe().columns:
Q1=df.describe().at['25%',i]
Q3=df.describe().at['75%',i]
IQR=Q3 - Q1
LTV=Q1 - 1.5 * IQR
UTV=Q3 + 1.5 * IQR
x=np.array(df[i])
p=[]
for j in x:
if j < LTV or j>UTV:
p.append(df[i].median())
else:
p.append(j)
df[i]=p
return df
X_con = outlier_detect(X_con)
X_cat = X.select_dtypes(include='object')
X_cat_col = list(X_cat.columns)
X_cat.isnull().sum()
X_con.isnull().sum()
X_cat.drop(['PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True)
X_cat.drop('Alley', 1, inplace=True)
X_cat.isnull().sum()
for col in X_cat.columns:
X_cat[col] = X_cat[col].fillna(X_cat[col].mode()[0])
for cols in X_cat.columns:
sns.set(style="whitegrid")
ax = sns.barplot(x=cols, y="SalePrice", data=df)
plt.show()
X_con.isnull().sum()
for col in X_con.columns:
X_con[col] = X_con[col].replace(to_replace=np.nan, value=0)
for cols in X_con.columns:
sns.set()
sns.distplot(X_con[cols])
plt.show() | code |
18104935/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
X_cat = X.select_dtypes(include='object')
X_cat_col = list(X_cat.columns)
X_cat.isnull().sum() | code |
18104935/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/train.csv'
df = pd.read_csv(path)
df.head() | code |
18104935/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
#outlier detection and replacing them with mean
def outlier_detect(df):
for i in df.describe().columns:
Q1=df.describe().at['25%',i]
Q3=df.describe().at['75%',i]
IQR=Q3 - Q1
LTV=Q1 - 1.5 * IQR
UTV=Q3 + 1.5 * IQR
x=np.array(df[i])
p=[]
for j in x:
if j < LTV or j>UTV:
p.append(df[i].median())
else:
p.append(j)
df[i]=p
return df
X_cat = X.select_dtypes(include='object')
X_cat_col = list(X_cat.columns)
X_cat.isnull().sum()
X_cat.drop(['PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True)
X_cat.drop('Alley', 1, inplace=True)
X_cat.isnull().sum()
for col in X_cat.columns:
X_cat[col] = X_cat[col].fillna(X_cat[col].mode()[0])
for cols in X_cat.columns:
sns.set(style='whitegrid')
ax = sns.barplot(x=cols, y='SalePrice', data=df)
plt.show() | code |
18104935/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
sns.pairplot(df[cols], size=2.5)
plt.show() | code |
18104935/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18104935/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns | code |
18104935/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
X_con_col = list(X_con.columns)
#outlier detection and replacing them with mean
def outlier_detect(df):
for i in df.describe().columns:
Q1=df.describe().at['25%',i]
Q3=df.describe().at['75%',i]
IQR=Q3 - Q1
LTV=Q1 - 1.5 * IQR
UTV=Q3 + 1.5 * IQR
x=np.array(df[i])
p=[]
for j in x:
if j < LTV or j>UTV:
p.append(df[i].median())
else:
p.append(j)
df[i]=p
return df
X_con = outlier_detect(X_con)
X_con.isnull().sum()
X_con.isnull().sum() | code |
18104935/cell_8 | [
"image_output_11.png",
"image_output_24.png",
"image_output_25.png",
"image_output_17.png",
"image_output_30.png",
"image_output_14.png",
"image_output_28.png",
"image_output_23.png",
"image_output_34.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_21.png",
"image_output_7.png",
"image_output_31.png",
"image_output_20.png",
"image_output_32.png",
"image_output_4.png",
"image_output_35.png",
"image_output_36.png",
"image_output_8.png",
"image_output_37.png",
"image_output_16.png",
"image_output_27.png",
"image_output_6.png",
"image_output_12.png",
"image_output_22.png",
"image_output_3.png",
"image_output_29.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_33.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png",
"image_output_26.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
df.plot.scatter(x='LotFrontage', y='SalePrice', ax=ax1)
df.plot.scatter(x='LotArea', y='SalePrice', ax=ax2)
df.plot.scatter(x='MSSubClass', y='SalePrice', ax=ax3)
df.plot.scatter(x='OverallQual', y='SalePrice', ax=ax4)
plt.show() | code |
18104935/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
X_cat = X.select_dtypes(include='object')
X_cat_col = list(X_cat.columns)
X_cat.isnull().sum()
X_cat.drop(['PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True)
X_cat.drop('Alley', 1, inplace=True)
X_cat.isnull().sum() | code |
18104935/cell_27 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
path = '../input/train.csv'
df = pd.read_csv(path)
df.columns
fig = plt.figure(2)
ax1 = fig.add_subplot(2, 2, 1)
ax2=fig.add_subplot(2,2,2)
ax3=fig.add_subplot(2,2,3)
ax4=fig.add_subplot(2,2,4)
df.plot.scatter(x='LotFrontage',y='SalePrice',ax=ax1)
df.plot.scatter(x='LotArea',y='SalePrice',ax=ax2)
df.plot.scatter(x='MSSubClass',y='SalePrice',ax=ax3)
df.plot.scatter(x='OverallQual',y='SalePrice',ax=ax4)
plt.show()
sns.set()
cols = list(df.columns)
X = df.drop('SalePrice', axis=1)
X_con = X.select_dtypes(exclude='object')
#outlier detection and replacing them with mean
def outlier_detect(df):
for i in df.describe().columns:
Q1=df.describe().at['25%',i]
Q3=df.describe().at['75%',i]
IQR=Q3 - Q1
LTV=Q1 - 1.5 * IQR
UTV=Q3 + 1.5 * IQR
x=np.array(df[i])
p=[]
for j in x:
if j < LTV or j>UTV:
p.append(df[i].median())
else:
p.append(j)
df[i]=p
return df
X_cat = X.select_dtypes(include='object')
X_cat_col = list(X_cat.columns)
X_cat.isnull().sum()
X_cat.drop(['PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True)
X_cat.drop('Alley', 1, inplace=True)
X_cat.isnull().sum()
for col in X_cat.columns:
X_cat[col] = X_cat[col].fillna(X_cat[col].mode()[0])
for cols in X_cat.columns:
sns.set(style="whitegrid")
ax = sns.barplot(x=cols, y="SalePrice", data=df)
plt.show()
X_cat.isnull().sum() | code |
130009993/cell_2 | [
"text_plain_output_1.png"
] | !pip install pandasai | code |
130009993/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.express as px
import math
import seaborn as sns
from pandas_profiling import ProfileReport
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130009993/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pandasai import PandasAI
from pandasai.llm.openai import OpenAI | code |
34119091/cell_42 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
sqrt_mileage.skew()
cube_root_mileage = np.cbrt(data['mileage'])
cube_root_mileage
cube_root_mileage.skew()
sns.distplot(cube_root_mileage, hist=True) | code |
34119091/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.info() | code |
34119091/cell_34 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage | code |
34119091/cell_30 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
log_mileage.skew() | code |
34119091/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
sns.distplot(data['mileage'], hist=True) | code |
34119091/cell_40 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
cube_root_mileage = np.cbrt(data['mileage'])
cube_root_mileage | code |
34119091/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage | code |
34119091/cell_41 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
cube_root_mileage = np.cbrt(data['mileage'])
cube_root_mileage
cube_root_mileage.skew() | code |
34119091/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
data['mileage'].skew() | code |
34119091/cell_7 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34119091/cell_45 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
cube_root_mileage = np.cbrt(data['mileage'])
cube_root_mileage
recipr_mileage = np.reciprocal(data['mileage'])
recipr_mileage | code |
34119091/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
import seaborn as sns
data['price'].hist(grid=False) | code |
34119091/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
data['price'].skew() | code |
34119091/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
sns.distplot(data['price'], hist=True) | code |
34119091/cell_35 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
sqrt_mileage.skew() | code |
34119091/cell_46 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
cube_root_mileage = np.cbrt(data['mileage'])
cube_root_mileage
recipr_mileage = np.reciprocal(data['mileage'])
recipr_mileage
recipr_mileage.skew() | code |
34119091/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape | code |
34119091/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data.head(2) | code |
34119091/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.head(2) | code |
34119091/cell_36 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data.shape
log_mileage = np.log(data['mileage'])
log_mileage
sqrt_mileage = np.sqrt(data['mileage'])
sqrt_mileage
sqrt_mileage.skew()
sns.distplot(sqrt_mileage, hist=True) | code |
73082288/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
target_mean = train_df['target'].mean()
target_median = train_df['target'].median()
target_std = train_df['target'].std()
target_min = train_df['target'].min()
target_25 = np.percentile(train_df['target'], 25)
target_50 = np.percentile(train_df['target'], 50)
target_75 = np.percentile(train_df['target'], 75)
target_max = train_df['target'].max()
target_skew = train_df['target'].skew(axis=0, skipna=True)
print('standard_error Variable')
print('----------')
standard_error_mean = train_df['standard_error'].mean()
print(f'Mean: {standard_error_mean}')
standard_error_median = train_df['standard_error'].median()
print(f'Median: {standard_error_median}')
standard_error_std = train_df['standard_error'].std()
print(f'Standard Deviation: {standard_error_std}')
standard_error_min = train_df['standard_error'].min()
print(f'Minimum Value: {standard_error_min}')
standard_error_25 = np.percentile(train_df['standard_error'], 25)
print(f'25th Percentile: {standard_error_25}')
standard_error_50 = np.percentile(train_df['standard_error'], 50)
print(f'50th Percentile: {standard_error_50}')
standard_error_75 = np.percentile(train_df['standard_error'], 75)
print(f'75th Percentile: {standard_error_75}')
standard_error_max = train_df['standard_error'].max()
print(f'Maximum Value: {standard_error_max}')
standard_error_skew = train_df['target'].skew(axis=0, skipna=True)
print(f'Skew: {standard_error_skew}')
plt.hist(train_df['standard_error'], edgecolor='black', bins=50) | code |
73082288/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
excerpt1 = train_df['excerpt'].min()
print(excerpt1) | code |
73082288/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xgboost as xgb
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
target_mean = train_df['target'].mean()
target_median = train_df['target'].median()
target_std = train_df['target'].std()
target_min = train_df['target'].min()
target_25 = np.percentile(train_df['target'], 25)
target_50 = np.percentile(train_df['target'], 50)
target_75 = np.percentile(train_df['target'], 75)
target_max = train_df['target'].max()
target_skew = train_df['target'].skew(axis=0, skipna=True)
standard_error_mean = train_df['standard_error'].mean()
standard_error_median = train_df['standard_error'].median()
standard_error_std = train_df['standard_error'].std()
standard_error_min = train_df['standard_error'].min()
standard_error_25 = np.percentile(train_df['standard_error'], 25)
standard_error_50 = np.percentile(train_df['standard_error'], 50)
standard_error_75 = np.percentile(train_df['standard_error'], 75)
standard_error_max = train_df['standard_error'].max()
standard_error_skew = train_df['target'].skew(axis=0, skipna=True)
text = train_df.excerpt[0]
wordcloud = WordCloud().generate(text)
wordcloud = WordCloud(max_font_size=50, max_words=100, background_color='white').generate(text)
wordcloud.generate(' '.join(train_df['excerpt_preprocessed']))
plt.axis('off')
def training(model, X_train, y_train, X_test, y_test):
model = make_pipeline(TfidfVectorizer(binary=True, ngram_range=(1, 1)), model)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
MSE = mse(y_test, y_pred)
xg = xgb.XGBRegressor(objective='reg:squarederror', colsample_bytree=0.3, learning_rate=0.1, max_depth=5, alpha=10, n_estimators=10)
ridge = Ridge(fit_intercept=True, normalize=False)
lr = LinearRegression()
m = [xg, ridge, lr]
mn = ['XGBoost Regression', 'Ridge Regression', 'Linear Regression']
print('Model:', mn)
print('Mean Squared Error:', MSE)
X = train_df['excerpt_preprocessed'].values
y = train_df['target'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
for i in range(0, len(m)):
training(model=m[i], X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test) | code |
73082288/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
print('target Variable')
print('----------')
target_mean = train_df['target'].mean()
print(f'Mean: {target_mean}')
target_median = train_df['target'].median()
print(f'Median: {target_median}')
target_std = train_df['target'].std()
print(f'Standard Deviation: {target_std}')
target_min = train_df['target'].min()
print(f'Minimum Value: {target_min}')
target_25 = np.percentile(train_df['target'], 25)
print(f'25th Percentile: {target_25}')
target_50 = np.percentile(train_df['target'], 50)
print(f'50th Percentile: {target_50}')
target_75 = np.percentile(train_df['target'], 75)
print(f'75th Percentile: {target_75}')
target_max = train_df['target'].max()
print(f'Maximum Value: {target_max}')
target_skew = train_df['target'].skew(axis=0, skipna=True)
print(f'Skew: {target_skew}')
plt.hist(train_df['target'], edgecolor='black', bins=50) | code |
73082288/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
train_df.head() | code |
73082288/cell_10 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
target_mean = train_df['target'].mean()
target_median = train_df['target'].median()
target_std = train_df['target'].std()
target_min = train_df['target'].min()
target_25 = np.percentile(train_df['target'], 25)
target_50 = np.percentile(train_df['target'], 50)
target_75 = np.percentile(train_df['target'], 75)
target_max = train_df['target'].max()
target_skew = train_df['target'].skew(axis=0, skipna=True)
standard_error_mean = train_df['standard_error'].mean()
standard_error_median = train_df['standard_error'].median()
standard_error_std = train_df['standard_error'].std()
standard_error_min = train_df['standard_error'].min()
standard_error_25 = np.percentile(train_df['standard_error'], 25)
standard_error_50 = np.percentile(train_df['standard_error'], 50)
standard_error_75 = np.percentile(train_df['standard_error'], 75)
standard_error_max = train_df['standard_error'].max()
standard_error_skew = train_df['target'].skew(axis=0, skipna=True)
text = train_df.excerpt[0]
wordcloud = WordCloud().generate(text)
wordcloud = WordCloud(max_font_size=50, max_words=100, background_color='white').generate(text)
wordcloud.generate(' '.join(train_df['excerpt_preprocessed']))
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.show() | code |
73082288/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import pandas as pd
import re
train_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/train.csv')
train_df.drop(train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True)
train_df.reset_index(drop=True, inplace=True)
test_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/test.csv')
submission_df = pd.read_csv('/kaggle/input/commonlitreadabilityprize/sample_submission.csv')
excerpt1 = train_df['excerpt'].min()
e = re.sub('[^a-zA-Z]', ' ', excerpt1)
e = e.lower()
e = nltk.word_tokenize(e)
e = [word for word in e if not word in set(stopwords.words('english'))]
lemma = nltk.WordNetLemmatizer()
e = [lemma.lemmatize(word) for word in e]
e = ' '.join(e)
print(e) | code |
72085616/cell_63 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape | code |
72085616/cell_21 | [
"text_plain_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
reduced_X_valid | code |
72085616/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
y = data.Price
y.isnull().count()
melb_predictors = data.drop(['Price'], axis=1)
melb_predictors.shape
melb_predictors.dtypes
X = melb_predictors.select_dtypes(exclude=['object'])
X.shape | code |
72085616/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train | code |
72085616/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
data.head() | code |
72085616/cell_57 | [
"text_plain_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True) | code |
72085616/cell_56 | [
"text_plain_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
X_train_full.shape
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing | code |
72085616/cell_79 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
object_cols = list(cat_variables[cat_variables].index)
object_cols
object_cols | code |
72085616/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
print('MAE from Approach 2 (Imputation):')
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid)) | code |
72085616/cell_33 | [
"text_html_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
cols_with_missing | code |
72085616/cell_20 | [
"text_plain_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
reduced_X_train | code |
72085616/cell_76 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
object_cols = list(cat_variables[cat_variables].index)
object_cols
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
drop_X_train.shape
from sklearn.preprocessing import OrdinalEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
ordinal_encoder = OrdinalEncoder()
label_X_train[object_cols] = ordinal_encoder.fit_transform(X_train[object_cols])
label_X_valid[object_cols] = ordinal_encoder.transform(X_valid[object_cols])
label_X_train | code |
72085616/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns
imputed_X_train_plus | code |
72085616/cell_29 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
imputed_X_valid | code |
72085616/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_valid | code |
72085616/cell_48 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
y = data.Price
y.isnull().count()
melb_predictors = data.drop(['Price'], axis=1)
melb_predictors.shape
data.shape
y = data.Price
y.shape | code |
72085616/cell_41 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns
print('MAE from Approach 3 (An Extension to Imputation):')
print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid)) | code |
72085616/cell_61 | [
"text_plain_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols | code |
72085616/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape | code |
72085616/cell_54 | [
"text_plain_output_1.png"
] | X_valid_full.shape
X_valid_full.columns
X_valid_full.head() | code |
72085616/cell_72 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
drop_X_train.shape
print('MAE from Approach 1 (Drop categorical variables):')
print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid)) | code |
72085616/cell_67 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
cat_variables | code |
72085616/cell_69 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
object_cols = list(cat_variables[cat_variables].index)
object_cols | code |
72085616/cell_52 | [
"text_plain_output_1.png"
] | X_valid_full.shape | code |
72085616/cell_64 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
X_train.head() | code |
72085616/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
y = data.Price
y.isnull().count()
melb_predictors = data.drop(['Price'], axis=1)
melb_predictors.shape | code |
72085616/cell_49 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
y = data.Price
y.isnull().count()
melb_predictors = data.drop(['Price'], axis=1)
melb_predictors.shape
melb_predictors.dtypes
X = melb_predictors.select_dtypes(exclude=['object'])
X.shape
X.dtypes
data.shape
y = data.Price
y.shape
X = data.drop(['Price'], axis=1)
X.shape | code |
72085616/cell_18 | [
"text_plain_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing | code |
72085616/cell_51 | [
"text_plain_output_1.png"
] | X_train_full.shape | code |
72085616/cell_68 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
cat_variables[cat_variables] | code |
72085616/cell_59 | [
"text_html_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols | code |
72085616/cell_28 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
imputed_X_train | code |
72085616/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
y = data.Price
y.isnull().count()
melb_predictors = data.drop(['Price'], axis=1)
melb_predictors.shape
melb_predictors.dtypes | code |
72085616/cell_38 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus | code |
72085616/cell_75 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
object_cols = list(cat_variables[cat_variables].index)
object_cols
object_cols | code |
72085616/cell_47 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
y = data.Price
y.isnull().count()
melb_predictors = data.drop(['Price'], axis=1)
melb_predictors.shape
data.shape | code |
72085616/cell_66 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables) | code |
72085616/cell_17 | [
"text_html_output_1.png"
] | cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing | code |
72085616/cell_35 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_plus | code |
72085616/cell_77 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
data.shape
pd.set_option('display.max_columns', None)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
X_train_full.shape
X_valid_full.shape
X_valid_full.columns
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
cols_with_missing
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
low_cardinality_cols
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
numerical_cols
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.shape
cat_variables = X_train.dtypes == 'object'
type(cat_variables)
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
drop_X_train.shape
from sklearn.preprocessing import OrdinalEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
print('MAE from Approach 2 (Ordinal Encoding):')
print(score_dataset(label_X_train, label_X_valid, y_train, y_valid)) | code |
72085616/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
cols_with_missing
test_cols_with_missing = []
for col in X_train.columns:
if X_train[col].isnull().any():
test_cols_with_missing.append(col)
test_cols_with_missing
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
print('MAE from Approach 1 (Drop columns with missing values):')
print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid)) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.