path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
50239477/cell_58
|
[
"text_html_output_1.png"
] |
import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
seed = 27912
filepath = '../input/breast-cancer-wisconsin-data/data.csv'
indexC = 'id'
targetC = 'diagnosis'
dataC = utils.load_data(filepath, indexC, targetC)
dataC.sample(5, random_state=seed)
filepathD = '../input/pima-indians-diabetes-database/diabetes.csv'
targetD = 'Outcome'
dataD = utils.pd.read_csv(filepathD, dtype={'Outcome': 'category'})
dataD.sample(5, random_state=seed)
filepathT_train = '../input/titanic/train.csv'
filepathT_test = '../input/titanic/test.csv'
filepathT_Union = '../input/titanic/gender_submission.csv'
dataT_train = utils.pd.read_csv(filepathT_train)
dataT_test = utils.pd.read_csv(filepathT_test)
dataT_Union = utils.pd.read_csv(filepathT_Union)
dataT = pd.DataFrame(columns=['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], index=range(len(dataT_test) + len(dataT_train) + 1))
w = []
for i in range(len(dataT_train)):
k = i + 1
dataT.iloc[k]['PassengerId'] = dataT_train.iloc[i]['PassengerId']
dataT.iloc[k]['Survived'] = dataT_train.iloc[i]['Survived']
dataT.iloc[k]['Pclass'] = dataT_train.iloc[i]['Pclass']
dataT.iloc[k]['Name'] = dataT_train.iloc[i]['Name']
dataT.iloc[k]['Sex'] = dataT_train.iloc[i]['Sex']
dataT.iloc[k]['Age'] = dataT_train.iloc[i]['Age']
dataT.iloc[k]['SibSp'] = dataT_train.iloc[i]['SibSp']
dataT.iloc[k]['Parch'] = dataT_train.iloc[i]['Parch']
dataT.iloc[k]['Ticket'] = dataT_train.iloc[i]['Ticket']
dataT.iloc[k]['Fare'] = dataT_train.iloc[i]['Fare']
dataT.iloc[k]['Cabin'] = dataT_train.iloc[i]['Cabin']
dataT.iloc[k]['Embarked'] = dataT_train.iloc[i]['Embarked']
for j in range(len(dataT_test)):
i = j + len(dataT_train) + 1
dataT.iloc[i]['PassengerId'] = dataT_test.iloc[j]['PassengerId']
dataT.iloc[i]['Survived'] = dataT_Union.iloc[j]['Survived']
dataT.iloc[i]['Pclass'] = dataT_test.iloc[j]['Pclass']
dataT.iloc[i]['Name'] = dataT_test.iloc[j]['Name']
dataT.iloc[i]['Sex'] = dataT_test.iloc[j]['Sex']
dataT.iloc[i]['Age'] = dataT_test.iloc[j]['Age']
dataT.iloc[i]['SibSp'] = dataT_test.iloc[j]['SibSp']
dataT.iloc[i]['Parch'] = dataT_test.iloc[j]['Parch']
dataT.iloc[i]['Ticket'] = dataT_test.iloc[j]['Ticket']
dataT.iloc[i]['Fare'] = dataT_test.iloc[j]['Fare']
dataT.iloc[i]['Cabin'] = dataT_test.iloc[j]['Cabin']
dataT.iloc[i]['Embarked'] = dataT_test.iloc[j]['Embarked']
dataT = dataT.drop([0], axis=0)
dataT = dataT.drop(['PassengerId'], axis=1)
dataT.sample(5, random_state=seed)
CX, Cy = utils.divide_dataset(dataC, target='diagnosis')
DX, Dy = utils.divide_dataset(dataD, target='Outcome')
TX, Ty = utils.divide_dataset(dataT, target='Survived')
DX_train.sample(5, random_state=seed)
CX_train.sample(5, random_state=seed)
Dy_train.sample(5, random_state=seed)
Cy_train.sample(5, random_state=seed)
CX_test.sample(5, random_state=seed)
Cy_test.sample(5, random_state=seed)
dataC_train = utils.join_dataset(CX_train, Cy_train)
dataC_test = utils.join_dataset(CX_test, Cy_test)
dataD_train = utils.join_dataset(DX_train, Dy_train)
dataD_train.sample(5, random_state=seed)
|
code
|
50239477/cell_16
|
[
"text_html_output_1.png"
] |
import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils
seed = 27912
filepath = '../input/breast-cancer-wisconsin-data/data.csv'
indexC = 'id'
targetC = 'diagnosis'
dataC = utils.load_data(filepath, indexC, targetC)
filepathD = '../input/pima-indians-diabetes-database/diabetes.csv'
targetD = 'Outcome'
dataD = utils.pd.read_csv(filepathD, dtype={'Outcome': 'category'})
dataD.sample(5, random_state=seed)
|
code
|
50239477/cell_47
|
[
"text_html_output_1.png"
] |
seed = 27912
CX_test.sample(5, random_state=seed)
|
code
|
50239477/cell_35
|
[
"text_plain_output_1.png"
] |
seed = 27912
Dy.sample(5, random_state=seed)
|
code
|
50239477/cell_43
|
[
"text_plain_output_1.png"
] |
seed = 27912
Dy_train.sample(5, random_state=seed)
|
code
|
50239477/cell_31
|
[
"text_html_output_1.png"
] |
seed = 27912
CX.sample(5, random_state=seed)
|
code
|
50239477/cell_46
|
[
"text_html_output_1.png"
] |
seed = 27912
DX_test.sample(5, random_state=seed)
|
code
|
50239477/cell_27
|
[
"text_html_output_1.png"
] |
import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
seed = 27912
filepath = '../input/breast-cancer-wisconsin-data/data.csv'
indexC = 'id'
targetC = 'diagnosis'
dataC = utils.load_data(filepath, indexC, targetC)
filepathD = '../input/pima-indians-diabetes-database/diabetes.csv'
targetD = 'Outcome'
dataD = utils.pd.read_csv(filepathD, dtype={'Outcome': 'category'})
filepathT_train = '../input/titanic/train.csv'
filepathT_test = '../input/titanic/test.csv'
filepathT_Union = '../input/titanic/gender_submission.csv'
dataT_train = utils.pd.read_csv(filepathT_train)
dataT_test = utils.pd.read_csv(filepathT_test)
dataT_Union = utils.pd.read_csv(filepathT_Union)
dataT = pd.DataFrame(columns=['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], index=range(len(dataT_test) + len(dataT_train) + 1))
w = []
for i in range(len(dataT_train)):
k = i + 1
dataT.iloc[k]['PassengerId'] = dataT_train.iloc[i]['PassengerId']
dataT.iloc[k]['Survived'] = dataT_train.iloc[i]['Survived']
dataT.iloc[k]['Pclass'] = dataT_train.iloc[i]['Pclass']
dataT.iloc[k]['Name'] = dataT_train.iloc[i]['Name']
dataT.iloc[k]['Sex'] = dataT_train.iloc[i]['Sex']
dataT.iloc[k]['Age'] = dataT_train.iloc[i]['Age']
dataT.iloc[k]['SibSp'] = dataT_train.iloc[i]['SibSp']
dataT.iloc[k]['Parch'] = dataT_train.iloc[i]['Parch']
dataT.iloc[k]['Ticket'] = dataT_train.iloc[i]['Ticket']
dataT.iloc[k]['Fare'] = dataT_train.iloc[i]['Fare']
dataT.iloc[k]['Cabin'] = dataT_train.iloc[i]['Cabin']
dataT.iloc[k]['Embarked'] = dataT_train.iloc[i]['Embarked']
for j in range(len(dataT_test)):
i = j + len(dataT_train) + 1
dataT.iloc[i]['PassengerId'] = dataT_test.iloc[j]['PassengerId']
dataT.iloc[i]['Survived'] = dataT_Union.iloc[j]['Survived']
dataT.iloc[i]['Pclass'] = dataT_test.iloc[j]['Pclass']
dataT.iloc[i]['Name'] = dataT_test.iloc[j]['Name']
dataT.iloc[i]['Sex'] = dataT_test.iloc[j]['Sex']
dataT.iloc[i]['Age'] = dataT_test.iloc[j]['Age']
dataT.iloc[i]['SibSp'] = dataT_test.iloc[j]['SibSp']
dataT.iloc[i]['Parch'] = dataT_test.iloc[j]['Parch']
dataT.iloc[i]['Ticket'] = dataT_test.iloc[j]['Ticket']
dataT.iloc[i]['Fare'] = dataT_test.iloc[j]['Fare']
dataT.iloc[i]['Cabin'] = dataT_test.iloc[j]['Cabin']
dataT.iloc[i]['Embarked'] = dataT_test.iloc[j]['Embarked']
dataT = dataT.drop([0], axis=0)
dataT = dataT.drop(['PassengerId'], axis=1)
dataT.sample(5, random_state=seed)
|
code
|
50239477/cell_36
|
[
"text_plain_output_1.png"
] |
seed = 27912
Ty.sample(5, random_state=seed)
|
code
|
74064119/cell_6
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(18,9))
plt.title('Valor Gasto por Faixa Etária', fontsize=14, fontweight = 'bold')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
sns.violinplot(ax = ax, x="Age", y="Purchase", data=df[['Age','Purchase']].sort_values(by=['Age']), palette="RdBu_r")
g = sns.catplot(x='Purchase', y='Gender', col='Age', data=df.sort_values(by=['Age']), col_wrap=1, orient='h', height=3, aspect=3, palette='Set3', kind='violin', dodge=True, bw=0.2)
|
code
|
74064119/cell_8
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(18,9))
plt.title('Valor Gasto por Faixa Etária', fontsize=14, fontweight = 'bold')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
sns.violinplot(ax = ax, x="Age", y="Purchase", data=df[['Age','Purchase']].sort_values(by=['Age']), palette="RdBu_r")
g = sns.catplot(x="Purchase", y="Gender", col="Age",
data=df.sort_values(by=['Age']), col_wrap=1,
orient="h", height=3, aspect=3, palette="Set3",
kind="violin", dodge=True, bw=.2)
import numpy as np
total = pd.DataFrame(df.Product_ID.value_counts()).head(9)
x = total.index.unique()
y = total.Product_ID
width = 0.8
fig, ax = plt.subplots()
ax.barh(x, y, width, color='#AAAAAA')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
for i, v in enumerate(y):
ax.text(v / 2, i, str(v), color='#111111', fontweight='bold', verticalalignment='center')
ax.invert_yaxis()
fig.set_size_inches(15, 8)
plt.title('Top 9 Produtos mais Comprados', fontsize=14, fontweight='bold')
plt.show()
|
code
|
74064119/cell_3
|
[
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv', delimiter=',')
df.head()
|
code
|
74064119/cell_10
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(18,9))
plt.title('Valor Gasto por Faixa Etária', fontsize=14, fontweight = 'bold')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
sns.violinplot(ax = ax, x="Age", y="Purchase", data=df[['Age','Purchase']].sort_values(by=['Age']), palette="RdBu_r")
g = sns.catplot(x="Purchase", y="Gender", col="Age",
data=df.sort_values(by=['Age']), col_wrap=1,
orient="h", height=3, aspect=3, palette="Set3",
kind="violin", dodge=True, bw=.2)
import numpy as np
total = pd.DataFrame(df.Product_ID.value_counts()).head(9)
x = total.index.unique()
y = total.Product_ID
width = 0.8
fig, ax = plt.subplots()
ax.barh(x, y, width, color = '#AAAAAA')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
for i, v in enumerate(y):
ax.text(v/2 , i , str(v), color='#111111', fontweight='bold', verticalalignment='center')
ax.invert_yaxis()
fig.set_size_inches(15, 8)
plt.title('Top 9 Produtos mais Comprados', fontsize=14, fontweight = 'bold')
plt.show()
occupation_order = list(df['Occupation'].value_counts().head(5).index)
df_target = df[df['Occupation'].isin(occupation_order)].sort_values(by='Age')
plt.figure(figsize=(20, 10))
g = sns.boxplot(x='Occupation', y='Purchase', hue='Age', data=df_target)
plt.title('Valores gastos por faixa etária associados às 5 ocupações mais frequentes\n', fontsize=16)
plt.xlabel('Ocupação')
plt.ylabel('Valor gasto')
plt.legend(loc=1, title='Idade')
plt.ylim(0, 35000)
plt.show()
|
code
|
74064119/cell_12
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(18,9))
plt.title('Valor Gasto por Faixa Etária', fontsize=14, fontweight = 'bold')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
sns.violinplot(ax = ax, x="Age", y="Purchase", data=df[['Age','Purchase']].sort_values(by=['Age']), palette="RdBu_r")
g = sns.catplot(x="Purchase", y="Gender", col="Age",
data=df.sort_values(by=['Age']), col_wrap=1,
orient="h", height=3, aspect=3, palette="Set3",
kind="violin", dodge=True, bw=.2)
import numpy as np
total = pd.DataFrame(df.Product_ID.value_counts()).head(9)
x = total.index.unique()
y = total.Product_ID
width = 0.8
fig, ax = plt.subplots()
ax.barh(x, y, width, color = '#AAAAAA')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
for i, v in enumerate(y):
ax.text(v/2 , i , str(v), color='#111111', fontweight='bold', verticalalignment='center')
ax.invert_yaxis()
fig.set_size_inches(15, 8)
plt.title('Top 9 Produtos mais Comprados', fontsize=14, fontweight = 'bold')
plt.show()
occupation_order = list(df['Occupation'].value_counts().head(5).index)
df_target = df[df['Occupation'].isin(occupation_order)].sort_values(by='Age')
plt.figure(figsize=(20,10))
g = sns.boxplot(x="Occupation", y="Purchase", hue="Age", data=df_target)
plt.title('Valores gastos por faixa etária associados às 5 ocupações mais frequentes\n', fontsize=16)
plt.xlabel('Ocupação')
plt.ylabel('Valor gasto')
plt.legend(loc=1, title='Idade')
plt.ylim(0, 35000)
plt.show()
total = df[df.Purchase > 9000].groupby(['Marital_Status', 'Occupation']).Purchase.sum().reset_index()
labels = total.Occupation.unique().astype(str)
casado = total[total.Marital_Status == 1].reset_index().Purchase / total.groupby('Occupation').sum().reset_index().Purchase
solteiro = total[total.Marital_Status == 0].reset_index().Purchase / total.groupby('Occupation').sum().reset_index().Purchase
width = 0.8
fig, ax = plt.subplots()
ax.bar(labels, casado, width, label='Casado', color='#7473aa')
ax.bar(labels, solteiro, width, label='Solteiro', bottom=casado, color='#f5ac9f')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
ax.legend()
fig.set_size_inches(15, 10)
plt.title('Valor Gasto por Ocupação x Estado Civil', fontsize=14, fontweight='bold')
plt.show()
|
code
|
74064119/cell_5
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv', delimiter=',')
fig, ax = plt.subplots(figsize=(18, 9))
plt.title('Valor Gasto por Faixa Etária', fontsize=14, fontweight='bold')
frame = plt.gca()
frame.spines['right'].set_visible(False)
frame.spines['top'].set_visible(False)
sns.violinplot(ax=ax, x='Age', y='Purchase', data=df[['Age', 'Purchase']].sort_values(by=['Age']), palette='RdBu_r')
|
code
|
89131938/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
train.head()
|
code
|
89131938/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
train.head()
|
code
|
89131938/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
89131938/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
train.head()
|
code
|
89131938/cell_16
|
[
"text_html_output_1.png"
] |
from matplotlib import pyplot
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
EARTH_RADIUS = 6378.137
def haversine(xy1, xy2):
return 2 * EARTH_RADIUS * np.arcsin(np.sqrt(np.sin((xy2[:, 0] - xy1[:, 0]) / 2) ** 2 + np.cos(xy1[:, 0]) * np.cos(xy2[:, 0]) * np.sin((xy2[:, 1] - xy2[:, 1]) / 2)))
train['distance'] = haversine(np.radians(train[['pickup_longitude', 'pickup_latitude']].values), np.radians(train[['dropoff_longitude', 'dropoff_latitude']].values))
test['distance'] = haversine(np.radians(test[['pickup_longitude', 'pickup_latitude']].values), np.radians(test[['dropoff_longitude', 'dropoff_latitude']].values))
pyplot.hist(np.log(train['distance'] + 1e-05), bins=50)
|
code
|
89131938/cell_17
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
train.head()
|
code
|
89131938/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
train.head()
|
code
|
89131938/cell_5
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip')
test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip')
train.describe()
|
code
|
90153261/cell_4
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
class Envierment:
def __init__(self, K, horizon):
self.K, self.horizon = (K, horizon)
self.q_values = list()
for k in range(self.K):
self.q_values.append(np.random.randn())
def give_reward(self, k):
if k >= K:
print('Invalid action!')
raise ValueError()
else:
return np.random.randn() + self.q_values[k]
class Agent:
def __init__(self, K, horizon):
self.K, self.horizon = (K, horizon)
self.action, self.reward = (0, 0)
self.result = np.zeros((horizon, 1))
self.result_df = pd.DataFrame()
def take_action(self, k=None):
if k == None:
self.action = np.random.choice(self.K)
else:
self.action = k
return self.action
def record_reward(self, trial, n, reward):
self.reward = reward
if self.result.shape[1] < n + 1:
self.result = np.c_[self.result, np.zeros(self.horizon)]
self.result[n][trial] = self.reward
def turn_result_into_df(self):
self.result = np.cumsum(self.result, axis=0)
for n in range(1, self.horizon):
self.result[n] /= n
self.result_df = pd.DataFrame(self.result, columns=['trial_' + str(x) for x in range(self.result.shape[1])])
K, horizon = (3, 100)
envioronment = Envierment(K, horizon)
agent = Agent(envioronment.K, envioronment.horizon)
trials = 10
Result = np.zeros((horizon, trials))
for trial in range(trials):
for n in range(horizon):
action = agent.take_action()
reward = envioronment.give_reward(action)
agent.record_reward(trial, n, reward)
agent.turn_result_into_df()
fig, ax = plt.subplots(figsize=(20, 10))
for trial in range(trials):
ax.plot(agent.result_df['trial_' + str(trial)])
ax.legend(agent.result_df.columns, loc='upper right')
ax.set(title='Average reward for each trial')
print(f'K: {envioronment.K}')
print(f'action-value: {envioronment.q_values}')
|
code
|
90153261/cell_11
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
### Let's create a simple 'Environment' class and 'Agent' class.
### For every action, the instant reward is sampled from Gaussian distribution with unit standard deviation, each one of which however has different mean.
### For the sake of simplicity, we assume stationarity.
class Envierment:
def __init__(self, K, horizon):
self.K, self.horizon = K, horizon
self.q_values = list()
for k in range(self.K):
self.q_values.append(np.random.randn())
def give_reward(self, k):
if k>= K:
print('Invalid action!')
raise ValueError()
else:
return np.random.randn() + self.q_values[k]
class Agent:
def __init__(self, K, horizon):
self.K, self.horizon = K, horizon
self.action, self.reward = 0, 0
self.result = np.zeros((horizon, 1))
self.result_df = pd.DataFrame()
def take_action(self, k=None):
if k == None:
self.action = np.random.choice(self.K)
else:
self.action = k
return self.action
def record_reward(self, trial, n, reward):
self.reward = reward
if self.result.shape[1]<n+1:
self.result = np.c_[self.result, np.zeros(self.horizon)]
self.result[n][trial] = self.reward
def turn_result_into_df(self):
self.result = np.cumsum(self.result, axis=0)
for n in range(1, self.horizon):
self.result[n] /=n
self.result_df = pd.DataFrame(self.result, columns = ['trial_'+ str(x) for x in range(self.result.shape[1])])
### Instantiate an Envierment object with K=3
K, horizon = 3, 100
envioronment = Envierment(K, horizon)
agent = Agent(envioronment.K, envioronment.horizon)
### The actions (0~K-1) are taken randomly, i.e. with no stretegy.
### We run 10 independent trials and compare them.
trials = 10
Result = np.zeros((horizon, trials))
for trial in range(trials):
for n in range(horizon):
action = agent.take_action()
reward = envioronment.give_reward(action)
agent.record_reward(trial, n, reward)
agent.turn_result_into_df()
### Plot the result
fig, ax = plt.subplots(figsize=(20, 10))
for trial in range(trials):
ax.plot(agent.result_df['trial_'+ str(trial)])
ax.legend(agent.result_df.columns, loc='upper right')
ax.set(title='Average reward for each trial')
print(f'K: {envioronment.K}')
print(f'action-value: {envioronment.q_values}')
### Let's modity 'Environment' class in a way that we can feed the predefined q_values when instantiating.
class Envierment:
def __init__(self, K, horizon):
self.K, self.horizon = K, horizon
self.q_values = list()
for k in range(self.K):
self.q_values.append(np.random.randn())
def give_reward(self, k):
if k>= K:
print('Invalid action!')
raise ValueError()
else:
return np.random.randn() + self.q_values[k]
### Let's also modity 'Agent' class so that it can update the estimate of action-values as it goes through the trials.
class Agent:
def __init__(self, K, horizon):
self.K, self.horizon = K, horizon
self.action, self.reward = 0, 0
self.result = np.zeros((horizon, 1))
self.result_df = pd.DataFrame()
self.Q_values = [0]*self.K
self.num_actions = [0]*self.K
self.step_size = 1
def take_action(self, k=None):
if k == None:
self.action = np.random.choice(self.K)
else:
self.action = k
return self.action
def incremental_update(self, step_size):
self.num_actions[self.action] += 1
if step_size == 'sample_average': self.step_size = (1/self.num_actions[self.action])
else : self.step_size = step_size
self.Q_values[self.action] = self.Q_values[self.action] + self.step_size*(self.reward-self.Q_values[self.action])
def record_reward(self, trial, n, reward, step_size):
self.reward = reward
if self.result.shape[1]<n+1:
self.result = np.c_[self.result, np.zeros(self.horizon)]
self.result[n][trial] = self.reward
self.incremental_update(step_size)
def turn_result_into_df(self):
self.result = np.cumsum(self.result, axis=0)
for n in range(1, self.horizon):
self.result[n] /=n
self.result_df = pd.DataFrame(self.result, columns = ['trial_'+ str(x) for x in range(self.result.shape[1])])
### Instantiate an Envierment object with K=3
K, horizon = 1, 20
true_q_values = np.array([0, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 4, 0, 4, 0, 4, 0, 4, 0]).reshape((horizon, 1))
envioronment = Envierment(K, horizon)
agent = Agent(envioronment.K, envioronment.horizon)
### Set K=1, i.e., we have only one action 0.
### To compare the effect of the step size, let's see how well the agent estimates the action-values depending on different stet sizes
step_sizes = [1/2, 1/8, 1, 'sample_average']
Result = np.zeros((horizon, len(step_sizes)))
Q_values = np.zeros((horizon, len(step_sizes)))
for trial, step_size in enumerate(step_sizes):
for n in range(horizon):
Q_values[n][trial] = np.array(agent.Q_values).squeeze()
action = agent.take_action()
reward = envioronment.give_reward(action)
agent.record_reward(trial, n, reward, step_size)
Q_values = Q_values.T
fig, axes = plt.subplots(1, 4, figsize=(40, 10))
for trial in range(len(step_sizes)):
axes[trial].plot( Q_values[trial] )
# ax.legend(df.columns, loc='upper right')
# ax.set(title='Average reward for each trial')
# print(f'K: {envioronment.K}')
# print(f'action-value: {envioronment.q_values}')
horizon = 20
q_a = [0, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 4, 0, 4, 0, 4, 0, 4, 0]
step_sizes = [1 / 2, 1 / 8, 1]
Q_a = [0]
df = pd.DataFrame(q_a, columns=['q_a'])
for step_size in step_sizes:
for n in range(1, horizon):
reward = np.random.randn() + q_a[n]
Q_a.append(Q_a[-1] + step_size * (reward - Q_a[-1]))
df[f'Q_a_{step_size}'] = Q_a
Q_a = [0]
for n in range(1, horizon):
reward = np.random.randn() + q_a[n]
Q_a.append(Q_a[-1] + 1 / n * (reward - Q_a[-1]))
df[f'Q_a_1/n'] = Q_a
fig, axes = plt.subplots(2, 2, figsize=(20, 12))
axes[0][0].plot(df['q_a'], 'o-')
axes[0][0].plot(df['Q_a_0.5'], 'o-')
axes[0][0].legend(['target', 'estimate'], loc='upper right')
axes[0][1].plot(df['q_a'], 'o-')
axes[0][1].plot(df['Q_a_0.125'], 'o-')
axes[0][1].legend(['target', 'estimate'], loc='upper right')
axes[1][0].plot(df['q_a'], 'o-')
axes[1][0].plot(df['Q_a_1'], 'o-')
axes[1][0].legend(['target', 'estimate'], loc='upper right')
axes[1][1].plot(df['q_a'], 'o-')
axes[1][1].plot(df['Q_a_1/n'], 'o-')
axes[1][1].legend(['target', 'estimate'], loc='upper right')
plt.show()
plt.close()
df
|
code
|
90153261/cell_8
|
[
"text_html_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
### Let's create a simple 'Environment' class and 'Agent' class.
### For every action, the instant reward is sampled from Gaussian distribution with unit standard deviation, each one of which however has different mean.
### For the sake of simplicity, we assume stationarity.
class Envierment:
def __init__(self, K, horizon):
self.K, self.horizon = K, horizon
self.q_values = list()
for k in range(self.K):
self.q_values.append(np.random.randn())
def give_reward(self, k):
if k>= K:
print('Invalid action!')
raise ValueError()
else:
return np.random.randn() + self.q_values[k]
class Agent:
def __init__(self, K, horizon):
self.K, self.horizon = K, horizon
self.action, self.reward = 0, 0
self.result = np.zeros((horizon, 1))
self.result_df = pd.DataFrame()
def take_action(self, k=None):
if k == None:
self.action = np.random.choice(self.K)
else:
self.action = k
return self.action
def record_reward(self, trial, n, reward):
self.reward = reward
if self.result.shape[1]<n+1:
self.result = np.c_[self.result, np.zeros(self.horizon)]
self.result[n][trial] = self.reward
def turn_result_into_df(self):
self.result = np.cumsum(self.result, axis=0)
for n in range(1, self.horizon):
self.result[n] /=n
self.result_df = pd.DataFrame(self.result, columns = ['trial_'+ str(x) for x in range(self.result.shape[1])])
### Instantiate an Envierment object with K=3
K, horizon = 3, 100
envioronment = Envierment(K, horizon)
agent = Agent(envioronment.K, envioronment.horizon)
### The actions (0~K-1) are taken randomly, i.e. with no stretegy.
### We run 10 independent trials and compare them.
trials = 10
Result = np.zeros((horizon, trials))
for trial in range(trials):
for n in range(horizon):
action = agent.take_action()
reward = envioronment.give_reward(action)
agent.record_reward(trial, n, reward)
agent.turn_result_into_df()
### Plot the result
fig, ax = plt.subplots(figsize=(20, 10))
for trial in range(trials):
ax.plot(agent.result_df['trial_'+ str(trial)])
ax.legend(agent.result_df.columns, loc='upper right')
ax.set(title='Average reward for each trial')
print(f'K: {envioronment.K}')
print(f'action-value: {envioronment.q_values}')
class Envierment:
def __init__(self, K, horizon):
self.K, self.horizon = (K, horizon)
self.q_values = list()
for k in range(self.K):
self.q_values.append(np.random.randn())
def give_reward(self, k):
if k >= K:
print('Invalid action!')
raise ValueError()
else:
return np.random.randn() + self.q_values[k]
class Agent:
def __init__(self, K, horizon):
self.K, self.horizon = (K, horizon)
self.action, self.reward = (0, 0)
self.result = np.zeros((horizon, 1))
self.result_df = pd.DataFrame()
self.Q_values = [0] * self.K
self.num_actions = [0] * self.K
self.step_size = 1
def take_action(self, k=None):
if k == None:
self.action = np.random.choice(self.K)
else:
self.action = k
return self.action
def incremental_update(self, step_size):
self.num_actions[self.action] += 1
if step_size == 'sample_average':
self.step_size = 1 / self.num_actions[self.action]
else:
self.step_size = step_size
self.Q_values[self.action] = self.Q_values[self.action] + self.step_size * (self.reward - self.Q_values[self.action])
def record_reward(self, trial, n, reward, step_size):
self.reward = reward
if self.result.shape[1] < n + 1:
self.result = np.c_[self.result, np.zeros(self.horizon)]
self.result[n][trial] = self.reward
self.incremental_update(step_size)
def turn_result_into_df(self):
self.result = np.cumsum(self.result, axis=0)
for n in range(1, self.horizon):
self.result[n] /= n
self.result_df = pd.DataFrame(self.result, columns=['trial_' + str(x) for x in range(self.result.shape[1])])
K, horizon = (1, 20)
true_q_values = np.array([0, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 4, 0, 4, 0, 4, 0, 4, 0]).reshape((horizon, 1))
envioronment = Envierment(K, horizon)
agent = Agent(envioronment.K, envioronment.horizon)
step_sizes = [1 / 2, 1 / 8, 1, 'sample_average']
Result = np.zeros((horizon, len(step_sizes)))
Q_values = np.zeros((horizon, len(step_sizes)))
for trial, step_size in enumerate(step_sizes):
for n in range(horizon):
Q_values[n][trial] = np.array(agent.Q_values).squeeze()
action = agent.take_action()
reward = envioronment.give_reward(action)
agent.record_reward(trial, n, reward, step_size)
Q_values = Q_values.T
fig, axes = plt.subplots(1, 4, figsize=(40, 10))
for trial in range(len(step_sizes)):
axes[trial].plot(Q_values[trial])
|
code
|
34132178/cell_21
|
[
"image_output_1.png"
] |
i = 0
while i != 10:
print('i: ', i)
i += 2
print(i, ' döngü sonunda değerimiz 10')
|
code
|
34132178/cell_13
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
data.V7.plot(kind='hist', bins=1000, figsize=(12, 5))
plt.show()
|
code
|
34132178/cell_2
|
[
"image_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
34132178/cell_11
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
data.V7.plot(kind='line', color='b', label='V7', linewidth=1, alpha=0.5, grid=True, linestyle=':')
data.V8.plot(color='r', label='V8', linewidth=1, alpha=0.5, grid=True, linestyle='-.')
plt.legend(loc='lower right')
plt.xlabel('V7')
plt.ylabel('V8 Değeri')
plt.title('Credit Card Fraud')
plt.show()
|
code
|
34132178/cell_19
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd #Padndas'ı pd olarak import edeceğimizi tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
import pandas as pd
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
series = data['V7']
data_frame = data[['V7']]
data[np.logical_and(data['V7'] > 40, data['V6'] < 70)]
|
code
|
34132178/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
|
code
|
34132178/cell_18
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd #Padndas'ı pd olarak import edeceğimizi tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
import pandas as pd
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
series = data['V7']
data_frame = data[['V7']]
x = data['V7'] > 40
data[x]
|
code
|
34132178/cell_8
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(32, 32))
sns.heatmap(data.corr(), annot=True, linewidths=0.7, fmt='.1f', ax=ax)
plt.show()
|
code
|
34132178/cell_3
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.head(10)
|
code
|
34132178/cell_14
|
[
"image_output_1.png"
] |
dictionary = {'elma': 'apple', 'üzüm': 'grape'}
print(dictionary.keys())
print(dictionary.values())
dictionary['elma'] = 'apple1'
print(dictionary)
dictionary['kavun'] = 'melon'
print(dictionary)
del dictionary['elma']
print(dictionary)
print('kavun' in dictionary)
dictionary.clear()
print(dictionary)
del dictionary
print(dictionary)
|
code
|
34132178/cell_22
|
[
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd #Padndas'ı pd olarak import edeceğimizi tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
dictionary = {'elma': 'apple', 'üzüm': 'grape'}
dictionary['elma'] = 'apple1'
dictionary['kavun'] = 'melon'
del dictionary['elma']
dictionary.clear()
del dictionary
import pandas as pd
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
series = data['V7']
data_frame = data[['V7']]
i = 0
while i != 10:
i += 2
lis = [2, 4, 6, 8, 10]
for i in lis:
print('i değeri: ', i)
print('')
for index, value in enumerate(lis):
print(index, ' : ', value)
print('')
dictionary = {'elma': 'apple', 'kavun': 'melon'}
for key, value in dictionary.items():
print(key, ' : ', value)
dictionary = {'elma': 'apple', 'kavun': 'melon'}
for key, value in dictionary.items():
print(value, ' : ', key)
for index, value in data[['V7']][0:1].iterrows():
print(index, ' : ', value)
|
code
|
34132178/cell_10
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
|
code
|
34132178/cell_12
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.corr()
import matplotlib.pyplot as plt #plt ifadesini önceden tanımlamamıştık. Burada onu tanımlıyoruz.
import seaborn as sns # sns'yi de görselleştirme (visualization tool) için kullanıyoruz.
#korelasyonu görsel olarak görüntülemek için aşağıdaki işlemi yapıyoruz.
f,ax = plt.subplots(figsize=(32, 32)) #Parantez içindeki değerler bizlere çizim alanının boyutunu gösteriyor.
sns.heatmap(data.corr(), annot=True, linewidths=.7, fmt= '.1f',ax=ax) #sns kodunu görselleştirme için kullanıyoruz.
plt.show() # En altta formülün çıkmasını engelliyor.
data.columns
data.plot(kind='scatter', x='V7', y='V8', alpha=0.7, color='b')
plt.xlabel('V7')
plt.ylabel('V8')
plt.title('V7 & V8 Dağılım Grafiği')
plt.show()
|
code
|
34132178/cell_5
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcardfraud/creditcard.csv')
data.info()
|
code
|
129029630/cell_21
|
[
"image_output_1.png"
] |
from fcmeans import FCM
from fcmeans import FCM
from sklearn.metrics import silhouette_score
import itertools
import itertools
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
X_numerics = df[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']]
ax = sns.pairplot(X_numerics[X_numerics.columns])
data = X_numerics.values
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
from fcmeans import FCM
min_clusters = 2
max_clusters = 10
silhouette_scores = []
cluster_numbers = []
for n_clusters in range(min_clusters, max_clusters + 1):
fcm = FCM(n_clusters=n_clusters)
fcm.fit(data)
labels = fcm.predict(data)
silhouette_avg = silhouette_score(data, labels)
silhouette_scores.append(silhouette_avg)
cluster_numbers.append(n_clusters)
fcmean = FCM(n_clusters=5)
fcmean.fit(data)
cnt = fcmean.centers
pred = fcmean.predict(data)
import itertools
variable_combinations = list(itertools.combinations(range(data.shape[1]), 2))
colors = ['r', 'b', 'g', 'purple', 'orange']
fcmean = FCM(n_clusters=6)
fcmean.fit(data)
cnt = fcmean.centers
pred = fcmean.predict(data)
import itertools
variable_combinations = list(itertools.combinations(range(data.shape[1]), 2))
colors = ['r', 'b', 'g', 'purple', 'orange', 'yellow']
for i, (x_index, y_index) in enumerate(variable_combinations):
plt.scatter(data[pred == 0, x_index], data[pred == 0, y_index], s=10, c=colors[0])
plt.scatter(data[pred == 1, x_index], data[pred == 1, y_index], s=10, c=colors[1])
plt.scatter(data[pred == 2, x_index], data[pred == 2, y_index], s=10, c=colors[2])
plt.scatter(data[pred == 3, x_index], data[pred == 3, y_index], s=10, c=colors[3])
plt.scatter(data[pred == 4, x_index], data[pred == 4, y_index], s=10, c=colors[4])
plt.scatter(data[pred == 5, x_index], data[pred == 5, y_index], s=10, c=colors[5])
plt.scatter(cnt[:, x_index], cnt[:, y_index], s=300, c='black', marker='+')
plt.xlabel(X_numerics.columns[x_index])
plt.ylabel(X_numerics.columns[y_index])
plt.title('Customer Clustering based on ' + X_numerics.columns[x_index] + ' and ' + X_numerics.columns[y_index])
plt.show()
|
code
|
129029630/cell_13
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
X_numerics = df[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']]
ax = sns.pairplot(X_numerics[X_numerics.columns])
|
code
|
129029630/cell_6
|
[
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
|
code
|
129029630/cell_2
|
[
"image_output_1.png"
] |
pip install fuzzy-c-means
|
code
|
129029630/cell_11
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
print('Data shape is', df.shape)
|
code
|
129029630/cell_19
|
[
"text_plain_output_1.png"
] |
from fcmeans import FCM
from fcmeans import FCM
from sklearn.metrics import silhouette_score
import itertools
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
X_numerics = df[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']]
ax = sns.pairplot(X_numerics[X_numerics.columns])
data = X_numerics.values
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
from fcmeans import FCM
min_clusters = 2
max_clusters = 10
silhouette_scores = []
cluster_numbers = []
for n_clusters in range(min_clusters, max_clusters + 1):
fcm = FCM(n_clusters=n_clusters)
fcm.fit(data)
labels = fcm.predict(data)
silhouette_avg = silhouette_score(data, labels)
silhouette_scores.append(silhouette_avg)
cluster_numbers.append(n_clusters)
fcmean = FCM(n_clusters=5)
fcmean.fit(data)
cnt = fcmean.centers
pred = fcmean.predict(data)
import itertools
variable_combinations = list(itertools.combinations(range(data.shape[1]), 2))
colors = ['r', 'b', 'g', 'purple', 'orange']
for i, (x_index, y_index) in enumerate(variable_combinations):
plt.scatter(data[pred == 0, x_index], data[pred == 0, y_index], s=10, c=colors[0])
plt.scatter(data[pred == 1, x_index], data[pred == 1, y_index], s=10, c=colors[1])
plt.scatter(data[pred == 2, x_index], data[pred == 2, y_index], s=10, c=colors[2])
plt.scatter(data[pred == 3, x_index], data[pred == 3, y_index], s=10, c=colors[3])
plt.scatter(data[pred == 4, x_index], data[pred == 4, y_index], s=10, c=colors[4])
plt.scatter(cnt[:, x_index], cnt[:, y_index], s=300, c='black', marker='+')
plt.xlabel(X_numerics.columns[x_index])
plt.ylabel(X_numerics.columns[y_index])
plt.title('Customer Clustering based on ' + X_numerics.columns[x_index] + ' and ' + X_numerics.columns[y_index])
plt.show()
|
code
|
129029630/cell_7
|
[
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
|
code
|
129029630/cell_8
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
df.describe()
|
code
|
129029630/cell_16
|
[
"text_plain_output_1.png"
] |
from fcmeans import FCM
from fcmeans import FCM
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
X_numerics = df[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']]
ax = sns.pairplot(X_numerics[X_numerics.columns])
data = X_numerics.values
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
from fcmeans import FCM
min_clusters = 2
max_clusters = 10
silhouette_scores = []
cluster_numbers = []
for n_clusters in range(min_clusters, max_clusters + 1):
fcm = FCM(n_clusters=n_clusters)
fcm.fit(data)
labels = fcm.predict(data)
silhouette_avg = silhouette_score(data, labels)
silhouette_scores.append(silhouette_avg)
cluster_numbers.append(n_clusters)
plt.plot(cluster_numbers, silhouette_scores, marker='o')
plt.xlabel('Number of Clusters')
plt.ylabel('Silhouette Score')
plt.title('Silhouette Score for Different Numbers of Clusters')
plt.show()
|
code
|
129029630/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
print('Is there any missing values', df.isnull().sum().any())
|
code
|
129029630/cell_12
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv')
df.drop('Gender', inplace=True, axis=1)
df.sample(3)
df.isnull().sum()
X_numerics = df[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']]
plt.figure(figsize=(15, 10))
sns.heatmap(X_numerics.corr(), annot=True)
plt.show()
|
code
|
49118528/cell_25
|
[
"text_plain_output_1.png"
] |
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
def downcast_dtypes(df):
"""
Changes column types in the dataframe:
`float64` type to `float32`
`int64` type to `int32`
"""
float_cols = [c for c in df if df[c].dtype == 'float64']
int_cols = [c for c in df if df[c].dtype == 'int64']
df[float_cols] = df[float_cols].astype(np.float32)
df[int_cols] = df[int_cols].astype(np.int32)
return df
def lag_feature(all_data, list_lags, index_cols, cols_to_rename):
shift_range = list_lags
for month_shift in tqdm_notebook(shift_range):
train_shift = all_data[index_cols + cols_to_rename].copy()
train_shift['date_block_num'] = train_shift['date_block_num'] + month_shift
foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x
train_shift = train_shift.rename(columns=foo)
all_data = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0)
del train_shift
return all_data
Monthly_sales = sales.groupby(["date_block_num", "shop_id"])['item_cnt_day'].sum().reset_index(name = 'item_cnt_month')
fig, axs = plt.subplots(10, 6)
for i in range(60):
shop_sale_per_month = Monthly_sales.loc[Monthly_sales['shop_id']==i]
axs[i//6,i%6].tick_params(axis='both', which='both', bottom=False, top= False, labelbottom=False, right=False, left=False, labelleft=False)
axs[i//6,i%6].plot(shop_sale_per_month['date_block_num'], shop_sale_per_month['item_cnt_month'])
del Monthly_sales, shop_sale_per_month
sales.loc[sales.shop_id == 0, 'shop_id'] = 57
test_data.loc[test_data.shop_id == 0, 'shop_id'] = 57
sales.loc[sales.shop_id == 1, 'shop_id'] = 58
test_data.loc[test_data.shop_id == 1, 'shop_id'] = 58
sales.loc[sales.shop_id == 10, 'shop_id'] = 11
test_data.loc[test_data.shop_id == 10, 'shop_id'] = 11
sales = sales[sales.item_cnt_day < 1001]
temp_df = pd.merge(test_data[['shop_id', 'item_id']], sales[['shop_id', 'item_id']], on=['shop_id', 'item_id'], how='left', indicator='Exist')
temp_var = (temp_df['Exist'] == 'left_only').sum()
index_cols = ['shop_id', 'item_id', 'date_block_num']
grid = []
for block_num in sales['date_block_num'].unique():
cur_shops = sales[sales['date_block_num'] == block_num]['shop_id'].unique()
cur_items = sales[sales['date_block_num'] == block_num]['item_id'].unique()
grid.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])), dtype='int32'))
grid = pd.DataFrame(np.vstack(grid), columns=index_cols, dtype=np.int32)
gb = sales.groupby(index_cols, as_index=False).agg({'item_cnt_day': ['sum']})
gb.rename(columns={'sum': 'target'}, inplace=True)
gb.columns = [col[0] if col[-1] == '' else col[-1] for col in gb.columns.values]
all_data = pd.merge(grid, gb, how='left', on=index_cols).fillna(0)
all_data.sort_values(['date_block_num', 'shop_id', 'item_id'], inplace=True)
all_data['target'] = all_data['target'].fillna(0).clip(0, 20)
all_data = all_data[all_data['date_block_num'] >= 12]
all_data
|
code
|
49118528/cell_20
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
def lag_feature(all_data, list_lags, index_cols, cols_to_rename):
shift_range = list_lags
for month_shift in tqdm_notebook(shift_range):
train_shift = all_data[index_cols + cols_to_rename].copy()
train_shift['date_block_num'] = train_shift['date_block_num'] + month_shift
foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x
train_shift = train_shift.rename(columns=foo)
all_data = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0)
del train_shift
return all_data
Monthly_sales = sales.groupby(["date_block_num", "shop_id"])['item_cnt_day'].sum().reset_index(name = 'item_cnt_month')
fig, axs = plt.subplots(10, 6)
for i in range(60):
shop_sale_per_month = Monthly_sales.loc[Monthly_sales['shop_id']==i]
axs[i//6,i%6].tick_params(axis='both', which='both', bottom=False, top= False, labelbottom=False, right=False, left=False, labelleft=False)
axs[i//6,i%6].plot(shop_sale_per_month['date_block_num'], shop_sale_per_month['item_cnt_month'])
del Monthly_sales, shop_sale_per_month
sales.loc[sales.shop_id == 0, 'shop_id'] = 57
test_data.loc[test_data.shop_id == 0, 'shop_id'] = 57
sales.loc[sales.shop_id == 1, 'shop_id'] = 58
test_data.loc[test_data.shop_id == 1, 'shop_id'] = 58
sales.loc[sales.shop_id == 10, 'shop_id'] = 11
test_data.loc[test_data.shop_id == 10, 'shop_id'] = 11
sales = sales[sales.item_cnt_day < 1001]
temp_df = pd.merge(test_data[['shop_id', 'item_id']], sales[['shop_id', 'item_id']], on=['shop_id', 'item_id'], how='left', indicator='Exist')
temp_var = (temp_df['Exist'] == 'left_only').sum()
print('Number of unique shop-item combination in the test set that do not exist in the training set:', temp_var)
|
code
|
49118528/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
sales.head()
|
code
|
49118528/cell_29
|
[
"text_html_output_1.png"
] |
from itertools import product
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
def downcast_dtypes(df):
"""
Changes column types in the dataframe:
`float64` type to `float32`
`int64` type to `int32`
"""
float_cols = [c for c in df if df[c].dtype == 'float64']
int_cols = [c for c in df if df[c].dtype == 'int64']
df[float_cols] = df[float_cols].astype(np.float32)
df[int_cols] = df[int_cols].astype(np.int32)
return df
def lag_feature(all_data, list_lags, index_cols, cols_to_rename):
shift_range = list_lags
for month_shift in tqdm_notebook(shift_range):
train_shift = all_data[index_cols + cols_to_rename].copy()
train_shift['date_block_num'] = train_shift['date_block_num'] + month_shift
foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x
train_shift = train_shift.rename(columns=foo)
all_data = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0)
del train_shift
return all_data
Monthly_sales = sales.groupby(["date_block_num", "shop_id"])['item_cnt_day'].sum().reset_index(name = 'item_cnt_month')
fig, axs = plt.subplots(10, 6)
for i in range(60):
shop_sale_per_month = Monthly_sales.loc[Monthly_sales['shop_id']==i]
axs[i//6,i%6].tick_params(axis='both', which='both', bottom=False, top= False, labelbottom=False, right=False, left=False, labelleft=False)
axs[i//6,i%6].plot(shop_sale_per_month['date_block_num'], shop_sale_per_month['item_cnt_month'])
del Monthly_sales, shop_sale_per_month
sales.loc[sales.shop_id == 0, 'shop_id'] = 57
test_data.loc[test_data.shop_id == 0, 'shop_id'] = 57
sales.loc[sales.shop_id == 1, 'shop_id'] = 58
test_data.loc[test_data.shop_id == 1, 'shop_id'] = 58
sales.loc[sales.shop_id == 10, 'shop_id'] = 11
test_data.loc[test_data.shop_id == 10, 'shop_id'] = 11
sales = sales[sales.item_cnt_day < 1001]
temp_df = pd.merge(test_data[['shop_id', 'item_id']], sales[['shop_id', 'item_id']], on=['shop_id', 'item_id'], how='left', indicator='Exist')
temp_var = (temp_df['Exist'] == 'left_only').sum()
Leakage_Percentage = (test_data.shape[0] - temp_var) / test_data.shape[0] * 100
index_cols = ['shop_id', 'item_id', 'date_block_num']
grid = []
for block_num in sales['date_block_num'].unique():
cur_shops = sales[sales['date_block_num'] == block_num]['shop_id'].unique()
cur_items = sales[sales['date_block_num'] == block_num]['item_id'].unique()
grid.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])), dtype='int32'))
grid = pd.DataFrame(np.vstack(grid), columns=index_cols, dtype=np.int32)
gb = sales.groupby(index_cols, as_index=False).agg({'item_cnt_day': ['sum']})
gb.rename(columns={'sum': 'target'}, inplace=True)
gb.columns = [col[0] if col[-1] == '' else col[-1] for col in gb.columns.values]
all_data = pd.merge(grid, gb, how='left', on=index_cols).fillna(0)
all_data.sort_values(['date_block_num', 'shop_id', 'item_id'], inplace=True)
all_data['target'] = all_data['target'].fillna(0).clip(0, 20)
all_data = all_data[all_data['date_block_num'] >= 12]
all_data
shops['city'] = shops.shop_name.apply(lambda x: str.replace(x, '!', '')).apply(lambda x: x.split(' ')[0])
shops['city_enc'] = LabelEncoder().fit_transform(shops['city'])
shops_data = shops[['shop_id', 'city_enc']]
all_data = pd.merge(all_data, shops_data, how='left', on=['shop_id'])
all_data = pd.merge(all_data, items, how='left', on=['item_id'])
all_data = all_data.drop('item_name', axis=1)
item_category['basket'] = item_category['item_category_name'].apply(lambda x: str(x).split(' ')[0])
item_category['basket_enc'] = LabelEncoder().fit_transform(item_category['basket'])
item_category = item_category[['item_category_id', 'basket_enc']]
all_data = pd.merge(all_data, item_category, how='left', on=['item_category_id'])
all_data
all_data = pd.concat([all_data, test_data], ignore_index=True, sort=False, keys=['date_block_num', 'shop_id', 'item_id', 'city_enc', 'item_category_id', 'basket_enc', 'target'])
all_data = downcast_dtypes(all_data)
all_data
|
code
|
49118528/cell_15
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
Monthly_sales = sales.groupby(['date_block_num', 'shop_id'])['item_cnt_day'].sum().reset_index(name='item_cnt_month')
fig, axs = plt.subplots(10, 6)
for i in range(60):
shop_sale_per_month = Monthly_sales.loc[Monthly_sales['shop_id'] == i]
axs[i // 6, i % 6].tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
axs[i // 6, i % 6].plot(shop_sale_per_month['date_block_num'], shop_sale_per_month['item_cnt_month'])
del Monthly_sales, shop_sale_per_month
|
code
|
49118528/cell_22
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
def lag_feature(all_data, list_lags, index_cols, cols_to_rename):
shift_range = list_lags
for month_shift in tqdm_notebook(shift_range):
train_shift = all_data[index_cols + cols_to_rename].copy()
train_shift['date_block_num'] = train_shift['date_block_num'] + month_shift
foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x
train_shift = train_shift.rename(columns=foo)
all_data = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0)
del train_shift
return all_data
Monthly_sales = sales.groupby(["date_block_num", "shop_id"])['item_cnt_day'].sum().reset_index(name = 'item_cnt_month')
fig, axs = plt.subplots(10, 6)
for i in range(60):
shop_sale_per_month = Monthly_sales.loc[Monthly_sales['shop_id']==i]
axs[i//6,i%6].tick_params(axis='both', which='both', bottom=False, top= False, labelbottom=False, right=False, left=False, labelleft=False)
axs[i//6,i%6].plot(shop_sale_per_month['date_block_num'], shop_sale_per_month['item_cnt_month'])
del Monthly_sales, shop_sale_per_month
sales.loc[sales.shop_id == 0, 'shop_id'] = 57
test_data.loc[test_data.shop_id == 0, 'shop_id'] = 57
sales.loc[sales.shop_id == 1, 'shop_id'] = 58
test_data.loc[test_data.shop_id == 1, 'shop_id'] = 58
sales.loc[sales.shop_id == 10, 'shop_id'] = 11
test_data.loc[test_data.shop_id == 10, 'shop_id'] = 11
sales = sales[sales.item_cnt_day < 1001]
temp_df = pd.merge(test_data[['shop_id', 'item_id']], sales[['shop_id', 'item_id']], on=['shop_id', 'item_id'], how='left', indicator='Exist')
temp_var = (temp_df['Exist'] == 'left_only').sum()
Leakage_Percentage = (test_data.shape[0] - temp_var) / test_data.shape[0] * 100
print('Percentage of shop-item combination in test data that are available in the training set:', Leakage_Percentage)
|
code
|
49118528/cell_27
|
[
"text_plain_output_1.png"
] |
from itertools import product
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/competitive-data-science-predict-future-sales/sales_train.csv')
test_data = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv')
items = pd.read_csv('../input/competitive-data-science-predict-future-sales/items.csv')
item_category = pd.read_csv('../input/competitive-data-science-predict-future-sales/item_categories.csv')
shops = pd.read_csv('../input/competitive-data-science-predict-future-sales/shops.csv')
def downcast_dtypes(df):
"""
Changes column types in the dataframe:
`float64` type to `float32`
`int64` type to `int32`
"""
float_cols = [c for c in df if df[c].dtype == 'float64']
int_cols = [c for c in df if df[c].dtype == 'int64']
df[float_cols] = df[float_cols].astype(np.float32)
df[int_cols] = df[int_cols].astype(np.int32)
return df
def lag_feature(all_data, list_lags, index_cols, cols_to_rename):
shift_range = list_lags
for month_shift in tqdm_notebook(shift_range):
train_shift = all_data[index_cols + cols_to_rename].copy()
train_shift['date_block_num'] = train_shift['date_block_num'] + month_shift
foo = lambda x: '{}_lag_{}'.format(x, month_shift) if x in cols_to_rename else x
train_shift = train_shift.rename(columns=foo)
all_data = pd.merge(all_data, train_shift, on=index_cols, how='left').fillna(0)
del train_shift
return all_data
Monthly_sales = sales.groupby(["date_block_num", "shop_id"])['item_cnt_day'].sum().reset_index(name = 'item_cnt_month')
fig, axs = plt.subplots(10, 6)
for i in range(60):
shop_sale_per_month = Monthly_sales.loc[Monthly_sales['shop_id']==i]
axs[i//6,i%6].tick_params(axis='both', which='both', bottom=False, top= False, labelbottom=False, right=False, left=False, labelleft=False)
axs[i//6,i%6].plot(shop_sale_per_month['date_block_num'], shop_sale_per_month['item_cnt_month'])
del Monthly_sales, shop_sale_per_month
sales.loc[sales.shop_id == 0, 'shop_id'] = 57
test_data.loc[test_data.shop_id == 0, 'shop_id'] = 57
sales.loc[sales.shop_id == 1, 'shop_id'] = 58
test_data.loc[test_data.shop_id == 1, 'shop_id'] = 58
sales.loc[sales.shop_id == 10, 'shop_id'] = 11
test_data.loc[test_data.shop_id == 10, 'shop_id'] = 11
sales = sales[sales.item_cnt_day < 1001]
temp_df = pd.merge(test_data[['shop_id', 'item_id']], sales[['shop_id', 'item_id']], on=['shop_id', 'item_id'], how='left', indicator='Exist')
temp_var = (temp_df['Exist'] == 'left_only').sum()
index_cols = ['shop_id', 'item_id', 'date_block_num']
grid = []
for block_num in sales['date_block_num'].unique():
cur_shops = sales[sales['date_block_num'] == block_num]['shop_id'].unique()
cur_items = sales[sales['date_block_num'] == block_num]['item_id'].unique()
grid.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])), dtype='int32'))
grid = pd.DataFrame(np.vstack(grid), columns=index_cols, dtype=np.int32)
gb = sales.groupby(index_cols, as_index=False).agg({'item_cnt_day': ['sum']})
gb.rename(columns={'sum': 'target'}, inplace=True)
gb.columns = [col[0] if col[-1] == '' else col[-1] for col in gb.columns.values]
all_data = pd.merge(grid, gb, how='left', on=index_cols).fillna(0)
all_data.sort_values(['date_block_num', 'shop_id', 'item_id'], inplace=True)
all_data['target'] = all_data['target'].fillna(0).clip(0, 20)
all_data = all_data[all_data['date_block_num'] >= 12]
all_data
shops['city'] = shops.shop_name.apply(lambda x: str.replace(x, '!', '')).apply(lambda x: x.split(' ')[0])
shops['city_enc'] = LabelEncoder().fit_transform(shops['city'])
shops_data = shops[['shop_id', 'city_enc']]
all_data = pd.merge(all_data, shops_data, how='left', on=['shop_id'])
all_data = pd.merge(all_data, items, how='left', on=['item_id'])
all_data = all_data.drop('item_name', axis=1)
item_category['basket'] = item_category['item_category_name'].apply(lambda x: str(x).split(' ')[0])
item_category['basket_enc'] = LabelEncoder().fit_transform(item_category['basket'])
item_category = item_category[['item_category_id', 'basket_enc']]
all_data = pd.merge(all_data, item_category, how='left', on=['item_category_id'])
all_data
|
code
|
49118528/cell_5
|
[
"text_html_output_1.png"
] |
import lightgbm as lgb
import numpy as np
import pandas as pd
for p in [np, pd, lgb]:
print(p.__name__, p.__version__)
|
code
|
1003897/cell_21
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
# How does the train / test split depend on the creation date?
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.set_index('created', drop=True)
all_agg_df = all_agg_df.groupby('train').resample('1D').size().transpose()
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = all_agg_df.plot.bar(ax=ax, stacked=True)
ax.set_xticklabels(all_agg_df.index.strftime('%a %b %d'))
ax.set_title('All listings creation date and train/test split');
all_agg_df.head()
all_agg_df = all_df.copy()
all_agg_df['dayofweek'] = all_df['created'].dt.dayofweek
all_agg_df = all_agg_df.groupby('dayofweek').size()
all_agg_df.plot.bar(title='All row creation by day of week')
|
code
|
1003897/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
g = sns.FacetGrid(all_df[all_df['bathrooms'] < 10], col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bathrooms')
|
code
|
1003897/cell_25
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
g = sns.FacetGrid(all_df[all_df['bathrooms'] < 10], col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bathrooms')
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bedrooms')
g = sns.FacetGrid(all_df, col='train', sharex=True, sharey=True, size=5)
g = g.map(sns.distplot, 'bathrooms')
# How does the train / test split depend on the creation date?
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.set_index('created', drop=True)
all_agg_df = all_agg_df.groupby('train').resample('1D').size().transpose()
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = all_agg_df.plot.bar(ax=ax, stacked=True)
ax.set_xticklabels(all_agg_df.index.strftime('%a %b %d'))
ax.set_title('All listings creation date and train/test split');
all_agg_df.head()
all_agg_df = all_df.copy()
all_agg_df['dayofweek'] = all_df['created'].dt.dayofweek
all_agg_df = all_agg_df.groupby('dayofweek').size()
all_agg_df = all_df.copy()
all_agg_df['hour'] = all_df['created'].dt.hour
all_agg_df = all_agg_df.groupby('hour').size()
# Separate line plot for day vs hour of day creation
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.reset_index()
all_agg_df['dayofweek'] = all_agg_df['created'].dt.weekday_name
all_agg_df['hour'] = all_agg_df['created'].dt.hour
all_agg_df = all_agg_df.groupby(['dayofweek', 'hour']).size().reset_index(name="count")
# all_agg_df = all_agg_df[['dayofweek', 'hour', 'checkouts']]
all_agg_df = all_agg_df.pivot_table(values='count', index='hour', columns='dayofweek')
all_agg_df = all_agg_df[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']]
day_palette = sns.color_palette("hls", 7) # Need to have 7 distinct colours
fig, ax = plt.subplots(1,1, figsize=(16,10))
all_agg_df.plot.line(ax=ax, linewidth=3, color=day_palette, title="Created by hour and day");
plot_df = train_df.copy()
plot_df['day'] = plot_df['created'].dt.dayofweek
plot_df['hour'] = plot_df['created'].dt.hour
sns.boxplot(data=plot_df, x='interest_level', y='day')
|
code
|
1003897/cell_4
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
print('Train DF Shape: {}, %age: {:.1f}'.format(train_df.shape, n_train_pct))
print('Test DF Shape: {}, %age: {:.1f}'.format(test_df.shape, n_test_pct))
|
code
|
1003897/cell_23
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
g = sns.FacetGrid(all_df[all_df['bathrooms'] < 10], col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bathrooms')
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bedrooms')
g = sns.FacetGrid(all_df, col='train', sharex=True, sharey=True, size=5)
g = g.map(sns.distplot, 'bathrooms')
# How does the train / test split depend on the creation date?
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.set_index('created', drop=True)
all_agg_df = all_agg_df.groupby('train').resample('1D').size().transpose()
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = all_agg_df.plot.bar(ax=ax, stacked=True)
ax.set_xticklabels(all_agg_df.index.strftime('%a %b %d'))
ax.set_title('All listings creation date and train/test split');
all_agg_df.head()
all_agg_df = all_df.copy()
all_agg_df['dayofweek'] = all_df['created'].dt.dayofweek
all_agg_df = all_agg_df.groupby('dayofweek').size()
all_agg_df = all_df.copy()
all_agg_df['hour'] = all_df['created'].dt.hour
all_agg_df = all_agg_df.groupby('hour').size()
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.reset_index()
all_agg_df['dayofweek'] = all_agg_df['created'].dt.weekday_name
all_agg_df['hour'] = all_agg_df['created'].dt.hour
all_agg_df = all_agg_df.groupby(['dayofweek', 'hour']).size().reset_index(name='count')
all_agg_df = all_agg_df.pivot_table(values='count', index='hour', columns='dayofweek')
all_agg_df = all_agg_df[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']]
day_palette = sns.color_palette('hls', 7)
fig, ax = plt.subplots(1, 1, figsize=(16, 10))
all_agg_df.plot.line(ax=ax, linewidth=3, color=day_palette, title='Created by hour and day')
|
code
|
1003897/cell_20
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.set_index('created', drop=True)
all_agg_df = all_agg_df.groupby('train').resample('1D').size().transpose()
fig, ax = plt.subplots(1, 1, figsize=(18, 10))
ax = all_agg_df.plot.bar(ax=ax, stacked=True)
ax.set_xticklabels(all_agg_df.index.strftime('%a %b %d'))
ax.set_title('All listings creation date and train/test split')
all_agg_df.head()
|
code
|
1003897/cell_19
|
[
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
g = sns.FacetGrid(all_df[all_df['bathrooms'] < 10], col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bathrooms')
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bedrooms')
g = sns.FacetGrid(all_df, col='train', sharex=True, sharey=True, size=5)
g = g.map(sns.distplot, 'bathrooms')
|
code
|
1003897/cell_15
|
[
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
g = sns.FacetGrid(all_df[all_df['bathrooms'] < 10], col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bathrooms')
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bedrooms')
sns.jointplot(data=all_df[all_df['train']], x='bedrooms', y='bathrooms', kind='reg')
|
code
|
1003897/cell_24
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
plot_df = train_df.copy()
plot_df['day'] = plot_df['created'].dt.dayofweek
plot_df['hour'] = plot_df['created'].dt.hour
plot_df.groupby(['day', 'interest_level']).size().plot.bar()
|
code
|
1003897/cell_14
|
[
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
g = sns.FacetGrid(all_df[all_df['bathrooms'] < 10], col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bathrooms')
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.countplot, 'bedrooms')
|
code
|
1003897/cell_22
|
[
"text_html_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
# How does the train / test split depend on the creation date?
all_agg_df = all_df.copy()
all_agg_df = all_agg_df.set_index('created', drop=True)
all_agg_df = all_agg_df.groupby('train').resample('1D').size().transpose()
fig, ax = plt.subplots(1,1, figsize=(18,10))
ax = all_agg_df.plot.bar(ax=ax, stacked=True)
ax.set_xticklabels(all_agg_df.index.strftime('%a %b %d'))
ax.set_title('All listings creation date and train/test split');
all_agg_df.head()
all_agg_df = all_df.copy()
all_agg_df['dayofweek'] = all_df['created'].dt.dayofweek
all_agg_df = all_agg_df.groupby('dayofweek').size()
all_agg_df = all_df.copy()
all_agg_df['hour'] = all_df['created'].dt.hour
all_agg_df = all_agg_df.groupby('hour').size()
all_agg_df.plot.bar(title='All row creation by hour of day', figsize=(10, 6))
|
code
|
1003897/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import seaborn as sns
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
g = sns.FacetGrid(all_df, col='test', sharex=True, sharey=False, size=5)
g = g.map(sns.distplot, 'bathrooms')
|
code
|
1003897/cell_5
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
INPUT_DIR = '../input/'
TRAIN_FILE = 'train.json'
TEST_FILE = 'test.json'
train_df = pd.read_json(INPUT_DIR + TRAIN_FILE)
test_df = pd.read_json(INPUT_DIR + TEST_FILE)
all_df = pd.concat((train_df, test_df), axis=0)
all_df['train'] = all_df['interest_level'].notnull()
all_df['test'] = all_df['interest_level'].isnull()
n_train = train_df.shape[0]
n_test = test_df.shape[0]
n_total = n_train + n_test
n_train_pct = n_train / n_total * 100.0
n_test_pct = n_test / n_total * 100.0
def print_df_info(df, name):
"""
Prints out more detailed DF info
"""
print('\n{} Info:\n'.format(name))
print(df.info())
print('\n{} Null info by column:\n'.format(name))
print(df.isnull().sum(axis=0))
print('\n{} Statistical Description:\n'.format(name))
print(df.describe())
print_df_info(train_df, 'Train')
print_df_info(test_df, 'Test')
|
code
|
105193321/cell_21
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
plt.figure(figsize=(20, 7))
sns.countplot(x=airline_data['Age'], hue=airline_data['Type of Travel'], palette='rocket_r')
|
code
|
105193321/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
|
code
|
105193321/cell_9
|
[
"text_plain_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
|
code
|
105193321/cell_25
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
sns.histplot(x='Type of Travel', data=airline_data, color='seagreen')
|
code
|
105193321/cell_56
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
df_airline = pd.DataFrame({'Departure Delay': airline_data['Departure Delay'], 'Arrival Delay': airline_data['Arrival Delay']})
ax = df_airline.plot.kde()
for i, row in airline_data.iterrows():
value = row['Arrival Delay']
if pd.isnull(value):
airline_data.loc[:, 'Arrival Delay'][i] = airline_data.loc[:, 'Departure Delay'][i]
airline_data.isnull().sum()
airline_data.info()
|
code
|
105193321/cell_34
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
cols = ['Departure and Arrival Time Convenience', 'Ease of Online Booking', 'Check-in Service', 'Online Boarding', 'Gate Location', 'On-board Service', 'Seat Comfort', 'Leg Room Service', 'Cleanliness', 'Food and Drink', 'In-flight Service', 'In-flight Wifi Service', 'In-flight Entertainment', 'Baggage Handling']
def create_plot_pivot(df, x_column):
""" Create a pivot table for satisfaction versus another rating for easy plotting. """
_df_plot = df.groupby([x_column, 'Satisfaction']).size().reset_index().pivot(columns='Satisfaction', index=x_column, values=0)
return _df_plot
fig, ax = plt.subplots(7, 2, figsize=(20, 40))
axe = ax.ravel()
for i in range(14):
create_plot_pivot(airline_data, cols[i]).plot(kind='bar', stacked=True, ax=axe[i])
plt.xlabel(cols[i])
axe[i].set_ylabel('Count of Passengers')
fig.show()
|
code
|
105193321/cell_23
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
plt.figure(figsize=(20, 7))
sns.countplot(x=airline_data['Age'], hue=airline_data['Class'], palette='cubehelix')
|
code
|
105193321/cell_55
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
df_airline = pd.DataFrame({'Departure Delay': airline_data['Departure Delay'], 'Arrival Delay': airline_data['Arrival Delay']})
ax = df_airline.plot.kde()
for i, row in airline_data.iterrows():
value = row['Arrival Delay']
if pd.isnull(value):
airline_data.loc[:, 'Arrival Delay'][i] = airline_data.loc[:, 'Departure Delay'][i]
airline_data.isnull().sum()
airline_data.head()
|
code
|
105193321/cell_40
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
df_airline = pd.DataFrame({'Departure Delay': airline_data['Departure Delay'], 'Arrival Delay': airline_data['Arrival Delay']})
ax = df_airline.plot.kde()
for i, row in airline_data.iterrows():
value = row['Arrival Delay']
if pd.isnull(value):
airline_data.loc[:, 'Arrival Delay'][i] = airline_data.loc[:, 'Departure Delay'][i]
|
code
|
105193321/cell_29
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
sns.histplot(x='Satisfaction', data=airline_data, color='#ffab8d')
|
code
|
105193321/cell_26
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
sns.histplot(x='Class', data=airline_data, color='purple')
|
code
|
105193321/cell_41
|
[
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
df_airline = pd.DataFrame({'Departure Delay': airline_data['Departure Delay'], 'Arrival Delay': airline_data['Arrival Delay']})
ax = df_airline.plot.kde()
for i, row in airline_data.iterrows():
value = row['Arrival Delay']
if pd.isnull(value):
airline_data.loc[:, 'Arrival Delay'][i] = airline_data.loc[:, 'Departure Delay'][i]
airline_data.isnull().sum()
|
code
|
105193321/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
|
code
|
105193321/cell_19
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
plt.figure(figsize=(20, 7))
sns.countplot(x=airline_data['Age'], hue=airline_data['Customer Type'], palette='Set2')
|
code
|
105193321/cell_18
|
[
"text_html_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
round(airline_data['Gender'].value_counts() / airline_data.shape[0] * 100, 2).plot.pie(autopct='%1.1f%%', figsize=(7, 7), explode=(0.02, 0.02), colors=sns.color_palette('pastel'))
|
code
|
105193321/cell_8
|
[
"text_plain_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.head()
|
code
|
105193321/cell_15
|
[
"text_plain_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1, 5], :]
d.style.background_gradient(cmap='viridis')
|
code
|
105193321/cell_38
|
[
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
df_airline = pd.DataFrame({'Departure Delay': airline_data['Departure Delay'], 'Arrival Delay': airline_data['Arrival Delay']})
ax = df_airline.plot.kde()
|
code
|
105193321/cell_17
|
[
"text_plain_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
|
code
|
105193321/cell_43
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
df_airline = pd.DataFrame({'Departure Delay': airline_data['Departure Delay'], 'Arrival Delay': airline_data['Arrival Delay']})
ax = df_airline.plot.kde()
for i, row in airline_data.iterrows():
value = row['Arrival Delay']
if pd.isnull(value):
airline_data.loc[:, 'Arrival Delay'][i] = airline_data.loc[:, 'Departure Delay'][i]
airline_data.isnull().sum()
airline_data.info()
|
code
|
105193321/cell_31
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
|
code
|
105193321/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.info()
|
code
|
105193321/cell_27
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
sns.histplot(x='Customer Type', data=airline_data, color='steelblue')
|
code
|
105193321/cell_36
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import seaborn as sns
airline_data = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv')
data_dic = pd.read_csv('../input/airline-passenger-satisfaction/data_dictionary.csv')
airline_data.shape
airline_data.isnull().sum()
airline_data.duplicated().sum()
des = airline_data.describe()
d = des.iloc[[1,5],:]
d.style.background_gradient(cmap='viridis')
# YELLOW represent the MAX.
airline_data.nunique()
airline_data.columns
airline_data.isnull().sum()
|
code
|
17116059/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
quartet = pd.read_csv('../input/quartet.csv', index_col='id')
df = pd.read_csv('../input/raw_lemonade_data.csv')
df['Revenue'] = df['Price'] * df['Sales']
df.head()
|
code
|
17116059/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
quartet = pd.read_csv('../input/quartet.csv', index_col='id')
print(quartet)
|
code
|
17116059/cell_16
|
[
"text_html_output_1.png"
] |
import numpy as np # linear algebra library
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
quartet = pd.read_csv('../input/quartet.csv', index_col='id')
df = pd.read_csv('../input/raw_lemonade_data.csv')
df['Revenue'] = df['Price'] * df['Sales']
df['Price'] = df.Price.str.replace('$', '').replace(' ', '')
df.Price = df.Price.astype(np.float64)
df.Revenue = df.Price * df.Sales
df
|
code
|
17116059/cell_14
|
[
"text_plain_output_1.png"
] |
import numpy as np # linear algebra library
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
quartet = pd.read_csv('../input/quartet.csv', index_col='id')
df = pd.read_csv('../input/raw_lemonade_data.csv')
df['Revenue'] = df['Price'] * df['Sales']
df['Price'] = df.Price.str.replace('$', '').replace(' ', '')
df.Price = df.Price.astype(np.float64)
print(df.dtypes)
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.