path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
323526/cell_9
|
[
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
date_x = pd.DataFrame()
date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean()
date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size()
date_y = pd.DataFrame()
date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean()
date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size()
i = int(len(date_y) / 3)
date_y[:i].plot(secondary_y='Frequency', figsize=(20, 5), title='date_y Year 1')
date_y[i:2 * i].plot(secondary_y='Frequency', figsize=(20, 5), title='date_y Year 2')
date_y[2 * i:].plot(secondary_y='Frequency', figsize=(20, 5), title='date_y Year 3')
|
code
|
323526/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
date_x = pd.DataFrame()
date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean()
date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size()
date_y = pd.DataFrame()
date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean()
date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size()
i = int(len(date_y) / 3)
date_x_freq = pd.DataFrame()
date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count()
date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count()
date_x_freq.plot(secondary_y='Testing set', figsize=(20, 8), title='Comparison of date_x distribution between training/testing set')
date_y_freq = pd.DataFrame()
date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count()
date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count()
date_y_freq[:i].plot(secondary_y='Testing set', figsize=(20, 8), title='Comparison of date_y distribution between training/testing set (first year)')
date_y_freq[2 * i:].plot(secondary_y='Testing set', figsize=(20, 8), title='Comparison of date_y distribution between training/testing set (last year)')
|
code
|
323526/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
date_x = pd.DataFrame()
date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean()
date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size()
date_x.plot(secondary_y='Frequency', figsize=(20, 10))
|
code
|
323526/cell_16
|
[
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] |
from sklearn.metrics import roc_auc_score
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
date_x = pd.DataFrame()
date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean()
date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size()
date_y = pd.DataFrame()
date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean()
date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size()
i = int(len(date_y) / 3)
date_x_freq = pd.DataFrame()
date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count()
date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count()
date_y_freq = pd.DataFrame()
date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count()
date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count()
from sklearn.metrics import roc_auc_score
features = pd.DataFrame()
features['date_x_prob'] = df_train.groupby('date_x')['outcome'].transform('mean')
features['date_y_prob'] = df_train.groupby('date_y')['outcome'].transform('mean')
features['date_x_count'] = df_train.groupby('date_x')['outcome'].transform('count')
features['date_y_count'] = df_train.groupby('date_y')['outcome'].transform('count')
_ = [print(f.ljust(12) + ' AUC: ' + str(round(roc_auc_score(df_train['outcome'], features[f]), 6))) for f in features.columns]
|
code
|
323526/cell_14
|
[
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
date_x = pd.DataFrame()
date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean()
date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size()
date_y = pd.DataFrame()
date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean()
date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size()
i = int(len(date_y) / 3)
date_x_freq = pd.DataFrame()
date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count()
date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count()
date_y_freq = pd.DataFrame()
date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count()
date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count()
print('date_y correlation in year 1: ' + str(np.corrcoef(date_y_freq[:i].fillna(0).T)[0, 1]))
print('date_y correlation in year 2: ' + str(np.corrcoef(date_y_freq[i:2 * i].fillna(0).T)[0, 1]))
print('date_y correlation in year 3: ' + str(np.corrcoef(date_y_freq[2 * i:].fillna(0).T)[0, 1]))
|
code
|
323526/cell_12
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
date_x = pd.DataFrame()
date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean()
date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size()
date_y = pd.DataFrame()
date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean()
date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size()
i = int(len(date_y) / 3)
date_x_freq = pd.DataFrame()
date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count()
date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count()
date_y_freq = pd.DataFrame()
date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count()
date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count()
print('Correlation of date_x distribution in training/testing sets: ' + str(np.corrcoef(date_x_freq.T)[0, 1]))
print('Correlation of date_y distribution in training/testing sets: ' + str(np.corrcoef(date_y_freq.fillna(0).T)[0, 1]))
|
code
|
323526/cell_5
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
for d in ['date_x', 'date_y']:
print('Start of ' + d + ': ' + str(df_train[d].min().date()))
print(' End of ' + d + ': ' + str(df_train[d].max().date()))
print('Range of ' + d + ': ' + str(df_train[d].max() - df_train[d].min()) + '\n')
|
code
|
129040249/cell_21
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
data.head(5)
|
code
|
129040249/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
|
code
|
129040249/cell_9
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.head(5)
|
code
|
129040249/cell_23
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
plt.figure(figsize=(10, 4))
sns.countplot(x='Genre', data=data, order=data['Genre'].value_counts().index)
plt.xticks(rotation='vertical')
plt.title('Genre vs. No. of Games released', fontsize=14)
plt.ylabel('No. of Games', fontsize=12)
plt.xlabel('Genre', fontsize=12)
plt.show()
|
code
|
129040249/cell_30
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
JAPAN_data = data.sort_values(by=['JP_Sales', 'Genre'], ascending=False)
JAPAN_data = JAPAN_data.reset_index()
JAPAN_data.drop(['Rank', 'Name', 'Platform', 'Year', 'Publisher', 'Global_Sales'], axis=1)
JAPAN_data = JAPAN_data.groupby('Genre').sum()
JAPAN_data
|
code
|
129040249/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
|
code
|
129040249/cell_29
|
[
"image_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
JAPAN_data = data.sort_values(by=['JP_Sales', 'Genre'], ascending=False)
JAPAN_data = JAPAN_data.reset_index()
JAPAN_data.drop(['Rank', 'Name', 'Platform', 'Year', 'Publisher', 'Global_Sales'], axis=1)
JAPAN_data = JAPAN_data.groupby('Genre').sum()
|
code
|
129040249/cell_11
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
|
code
|
129040249/cell_19
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
data.describe()
|
code
|
129040249/cell_18
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
data.info()
|
code
|
129040249/cell_28
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
JAPAN_data = data.sort_values(by=['JP_Sales', 'Genre'], ascending=False)
JAPAN_data = JAPAN_data.reset_index()
JAPAN_data.drop(['Rank', 'Name', 'Platform', 'Year', 'Publisher', 'Global_Sales'], axis=1)
|
code
|
129040249/cell_8
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data.head(5)
|
code
|
129040249/cell_15
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
|
code
|
129040249/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
data['Genre'].astype('category')
|
code
|
129040249/cell_24
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
Action_data = data[data['Genre'] == 'Action']
Action_data
|
code
|
129040249/cell_14
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
print('number of duplicate rows: ', duplicate_rows_data.shape)
|
code
|
129040249/cell_22
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
data.isnull().sum()
duplicate_rows_data = data[data.duplicated()]
data.drop_duplicates()
data['Genre'].value_counts()
|
code
|
129040249/cell_12
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv')
data = pd.read_csv('/kaggle/input/videogamesales/vgsales.csv', index_col='Rank')
data.shape
data.dtypes
|
code
|
122258149/cell_21
|
[
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
import matplotlib.pyplot as plt
def league_teams(league):
teams=[]
for i in range(0,len(df)):
if df['League'][i] == league:
teams.append(df['HomeTeam'][i])
team_counts = pd.DataFrame(data=teams, columns=['Teams']).value_counts()
team_counts = team_counts.reset_index()
column_name = '{} Teams most played since 2012'.format(league)
team_counts.columns = [column_name, 'Counts']
team_counts = team_counts.plot(kind='bar',x=column_name,stacked=True, figsize=(17,5))
def plot_team_result_in_season(season, team):
home = df[(df['HomeTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
away = df[(df['AwayTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
ploting = pd.DataFrame([home, away], ['home', 'away'])
plot_team_result_in_season('2012-2013', 'Man United')
|
code
|
122258149/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
import matplotlib.pyplot as plt
def league_teams(league):
teams=[]
for i in range(0,len(df)):
if df['League'][i] == league:
teams.append(df['HomeTeam'][i])
team_counts = pd.DataFrame(data=teams, columns=['Teams']).value_counts()
team_counts = team_counts.reset_index()
column_name = '{} Teams most played since 2012'.format(league)
team_counts.columns = [column_name, 'Counts']
team_counts = team_counts.plot(kind='bar',x=column_name,stacked=True, figsize=(17,5))
league_teams('Premier League')
|
code
|
122258149/cell_9
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
c = df['Country'].value_counts()
print(c)
c.plot(kind='bar')
|
code
|
122258149/cell_4
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
df.head()
|
code
|
122258149/cell_30
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
import matplotlib.pyplot as plt
def league_teams(league):
teams=[]
for i in range(0,len(df)):
if df['League'][i] == league:
teams.append(df['HomeTeam'][i])
team_counts = pd.DataFrame(data=teams, columns=['Teams']).value_counts()
team_counts = team_counts.reset_index()
column_name = '{} Teams most played since 2012'.format(league)
team_counts.columns = [column_name, 'Counts']
team_counts = team_counts.plot(kind='bar',x=column_name,stacked=True, figsize=(17,5))
def plot_team_result_in_season(season, team):
home = df[(df['HomeTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
away = df[(df['AwayTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
ploting = pd.DataFrame([home, away], ['home', 'away'])
import seaborn as sns
def home_goals_in_season(team, season):
date = []
goals = []
total_shots = []
shot_target = []
corners = []
for i in range(0, len(df)):
if (df['HomeTeam'][i] == team) & (df['Season'][i] == season):
date.append(df['Date'][i])
goals.append(df['FTHG'][i])
total_shots.append(df['HS'][i])
shot_target.append(df['HST'][i])
corners.append(df['HC'][i])
if i == len(df) - 1:
goals_data = {'Date': date, 'Home goals': goals, 'Home total shots': total_shots, 'Home shot on target': shot_target, 'Home team corners': corners}
home_goals = pd.DataFrame(goals_data).set_index('Date')
sns.set_style('dark')
def away_goals_in_season(team, season):
date = []
goals = []
total_shots = []
shot_target = []
corners = []
for i in range(0, len(df)):
if (df['AwayTeam'][i] == team) & (df['Season'][i] == season):
date.append(df['Date'][i])
goals.append(df['FTAG'][i])
total_shots.append(df['AS'][i])
shot_target.append(df['AST'][i])
corners.append(df['AC'][i])
if i == len(df) - 1:
goals_data = {'Date': date, 'Away goals': goals, 'Away total shots': total_shots, 'Away shot on target': shot_target, 'Away team corners': corners}
away_goals = pd.DataFrame(goals_data).set_index('Date')
sns.set_style('dark')
def home_fouls(team, season):
date = []
fouls = []
yellow_card = []
red_card = []
for i in range(0, len(df)):
if (df['HomeTeam'][i] == team) & (df['Season'][i] == season):
date.append(df['Date'][i])
fouls.append(df['HF'][i])
yellow_card.append(df['HY'][i])
red_card.append(df['HR'][i])
if i == len(df) - 1:
fouls_data = {'Date': date, 'Fouls': fouls, 'Yellow card': yellow_card, 'Red card': red_card}
home_fouls = pd.DataFrame(fouls_data).set_index('Date')
sns.set_style('dark')
home_fouls('Milan', '2019-2020')
|
code
|
122258149/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
122258149/cell_7
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
df.info()
|
code
|
122258149/cell_18
|
[
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
def search_team_result_in_season(season, team):
result = df[(df['Season'] == season) & ((df['AwayTeam'] == team) | (df['HomeTeam'] == team))][['League', 'Season', 'HomeTeam', 'AwayTeam', 'FTR', 'FTHG', 'FTAG']]
return result
search_team_result_in_season('2012-2013', 'Real Madrid')
|
code
|
122258149/cell_24
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
import matplotlib.pyplot as plt
def league_teams(league):
teams=[]
for i in range(0,len(df)):
if df['League'][i] == league:
teams.append(df['HomeTeam'][i])
team_counts = pd.DataFrame(data=teams, columns=['Teams']).value_counts()
team_counts = team_counts.reset_index()
column_name = '{} Teams most played since 2012'.format(league)
team_counts.columns = [column_name, 'Counts']
team_counts = team_counts.plot(kind='bar',x=column_name,stacked=True, figsize=(17,5))
def plot_team_result_in_season(season, team):
home = df[(df['HomeTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
away = df[(df['AwayTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
ploting = pd.DataFrame([home, away], ['home', 'away'])
import seaborn as sns
def home_goals_in_season(team, season):
date = []
goals = []
total_shots = []
shot_target = []
corners = []
for i in range(0, len(df)):
if (df['HomeTeam'][i] == team) & (df['Season'][i] == season):
date.append(df['Date'][i])
goals.append(df['FTHG'][i])
total_shots.append(df['HS'][i])
shot_target.append(df['HST'][i])
corners.append(df['HC'][i])
if i == len(df) - 1:
goals_data = {'Date': date, 'Home goals': goals, 'Home total shots': total_shots, 'Home shot on target': shot_target, 'Home team corners': corners}
home_goals = pd.DataFrame(goals_data).set_index('Date')
sns.set_style('dark')
home_goals_in_season('Real Madrid', '2012-2013')
|
code
|
122258149/cell_10
|
[
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
l = df['League'].value_counts()
print(l)
l.plot(kind='bar')
|
code
|
122258149/cell_27
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/euro-football-data-since-2012/Euro-Football_2012-2023.csv')
drop_index = []
for j in range(0, len(df)):
if (pd.isnull(df['HomeTeam'][j]) == True) | (pd.isnull(df['FTHG'][j]) == True):
drop_index.append(j)
df.drop(drop_index, axis=0, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('id', axis=1, inplace=True)
import matplotlib.pyplot as plt
def league_teams(league):
teams=[]
for i in range(0,len(df)):
if df['League'][i] == league:
teams.append(df['HomeTeam'][i])
team_counts = pd.DataFrame(data=teams, columns=['Teams']).value_counts()
team_counts = team_counts.reset_index()
column_name = '{} Teams most played since 2012'.format(league)
team_counts.columns = [column_name, 'Counts']
team_counts = team_counts.plot(kind='bar',x=column_name,stacked=True, figsize=(17,5))
def plot_team_result_in_season(season, team):
home = df[(df['HomeTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
away = df[(df['AwayTeam'] == team) & (df['Season'] == season)]['FTR'].value_counts()
ploting = pd.DataFrame([home, away], ['home', 'away'])
import seaborn as sns
def home_goals_in_season(team, season):
date = []
goals = []
total_shots = []
shot_target = []
corners = []
for i in range(0, len(df)):
if (df['HomeTeam'][i] == team) & (df['Season'][i] == season):
date.append(df['Date'][i])
goals.append(df['FTHG'][i])
total_shots.append(df['HS'][i])
shot_target.append(df['HST'][i])
corners.append(df['HC'][i])
if i == len(df) - 1:
goals_data = {'Date': date, 'Home goals': goals, 'Home total shots': total_shots, 'Home shot on target': shot_target, 'Home team corners': corners}
home_goals = pd.DataFrame(goals_data).set_index('Date')
sns.set_style('dark')
def away_goals_in_season(team, season):
date = []
goals = []
total_shots = []
shot_target = []
corners = []
for i in range(0, len(df)):
if (df['AwayTeam'][i] == team) & (df['Season'][i] == season):
date.append(df['Date'][i])
goals.append(df['FTAG'][i])
total_shots.append(df['AS'][i])
shot_target.append(df['AST'][i])
corners.append(df['AC'][i])
if i == len(df) - 1:
goals_data = {'Date': date, 'Away goals': goals, 'Away total shots': total_shots, 'Away shot on target': shot_target, 'Away team corners': corners}
away_goals = pd.DataFrame(goals_data).set_index('Date')
sns.set_style('dark')
away_goals_in_season('Barcelona', '2014-2015')
|
code
|
17142199/cell_13
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
import torch.nn.functional as F
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
def nll(inp, targ):
return -inp[range(targ.shape[0]), targ].mean()
loss = nll(sm_pred, y_train)
loss
def log_softmax(x):
return x - x.exp().sum(-1, keepdim=True).log()
test_near(nll(log_softmax(pred), y_train), loss)
def log_softmax(x):
return x - x.logsumexp(-1, keepdim=True)
test_near(nll(log_softmax(pred), y_train), loss)
test_near(F.nll_loss(F.log_softmax(pred, -1), y_train), loss)
test_near(F.cross_entropy(pred, y_train), loss)
loss_func = F.cross_entropy
def acc(out, yb):
return (torch.argmax(out, -1) == yb).float().mean()
bs = 64
xb, yb = (x_train[:bs], y_train[:bs])
preds = model(xb)
(loss_func(preds, yb), acc(preds, yb))
lr = 0.5
epochs = 1
for e in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = bs * i
end_i = bs * (i + 1)
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
loss = loss_func(model(xb), yb)
loss.backward()
with torch.no_grad():
for l in model.layers:
if hasattr(l, 'weight'):
l.weight -= l.weight.grad * lr
l.bias -= l.bias.grad * lr
l.weight.grad.zero_()
l.bias.grad.zero_()
(loss_func(model(xb), yb), acc(model(xb), yb))
model = Model(m, nh, 10)
for name, l in model.named_children():
print(f'{name}: {l}')
|
code
|
17142199/cell_4
|
[
"text_plain_output_1.png"
] |
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
|
code
|
17142199/cell_6
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
|
code
|
17142199/cell_11
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
import torch.nn.functional as F
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
def nll(inp, targ):
return -inp[range(targ.shape[0]), targ].mean()
loss = nll(sm_pred, y_train)
loss
def log_softmax(x):
return x - x.exp().sum(-1, keepdim=True).log()
test_near(nll(log_softmax(pred), y_train), loss)
def log_softmax(x):
return x - x.logsumexp(-1, keepdim=True)
test_near(nll(log_softmax(pred), y_train), loss)
test_near(F.nll_loss(F.log_softmax(pred, -1), y_train), loss)
test_near(F.cross_entropy(pred, y_train), loss)
loss_func = F.cross_entropy
def acc(out, yb):
return (torch.argmax(out, -1) == yb).float().mean()
bs = 64
xb, yb = (x_train[:bs], y_train[:bs])
preds = model(xb)
(loss_func(preds, yb), acc(preds, yb))
lr = 0.5
epochs = 1
for e in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = bs * i
end_i = bs * (i + 1)
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
loss = loss_func(model(xb), yb)
loss.backward()
with torch.no_grad():
for l in model.layers:
if hasattr(l, 'weight'):
l.weight -= l.weight.grad * lr
l.bias -= l.bias.grad * lr
l.weight.grad.zero_()
l.bias.grad.zero_()
(loss_func(model(xb), yb), acc(model(xb), yb))
|
code
|
17142199/cell_7
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
def nll(inp, targ):
return -inp[range(targ.shape[0]), targ].mean()
loss = nll(sm_pred, y_train)
loss
|
code
|
17142199/cell_15
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
import torch.nn.functional as F
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
def nll(inp, targ):
return -inp[range(targ.shape[0]), targ].mean()
loss = nll(sm_pred, y_train)
loss
def log_softmax(x):
return x - x.exp().sum(-1, keepdim=True).log()
test_near(nll(log_softmax(pred), y_train), loss)
def log_softmax(x):
return x - x.logsumexp(-1, keepdim=True)
test_near(nll(log_softmax(pred), y_train), loss)
test_near(F.nll_loss(F.log_softmax(pred, -1), y_train), loss)
test_near(F.cross_entropy(pred, y_train), loss)
loss_func = F.cross_entropy
def acc(out, yb):
return (torch.argmax(out, -1) == yb).float().mean()
bs = 64
xb, yb = (x_train[:bs], y_train[:bs])
preds = model(xb)
(loss_func(preds, yb), acc(preds, yb))
lr = 0.5
epochs = 1
for e in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = bs * i
end_i = bs * (i + 1)
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
loss = loss_func(model(xb), yb)
loss.backward()
with torch.no_grad():
for l in model.layers:
if hasattr(l, 'weight'):
l.weight -= l.weight.grad * lr
l.bias -= l.bias.grad * lr
l.weight.grad.zero_()
l.bias.grad.zero_()
(loss_func(model(xb), yb), acc(model(xb), yb))
model = Model(m, nh, 10)
def fit():
for e in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = bs * i
end_i = bs * (i + 1)
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
loss = loss_func(model(xb), yb)
loss.backward()
with torch.no_grad():
for p in model.parameters():
p -= p.grad * lr
model.zero_grad()
fit()
(loss_func(model(xb), yb), acc(model(xb), yb))
|
code
|
17142199/cell_3
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
import operator
from pathlib import Path
from IPython.core.debugger import set_trace
from fastai import datasets
import pickle, gzip, math, torch, matplotlib as mpl
import matplotlib.pyplot as plt
from torch import tensor, nn
import torch.nn.functional as F
def test(a, b, cmp, cname=None):
if cname is None:
cname = cmp.__name__
assert cmp(a, b), f'{cname}:\n{a}\n{b}'
def test_eq(a, b):
test(a, b, operator.eq, '==')
def near(a, b):
return torch.allclose(a, b, 0.001, 1e-05)
def test_near(a, b):
test(a, b, near)
MNIST_URL = 'http://deeplearning.net/data/mnist/mnist.pkl'
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
(x_train, y_train), (x_valid, y_valid), _ = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train, y_train, x_valid, y_valid))
def normalize(x, m, s):
return (x - m) / s
|
code
|
17142199/cell_14
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
import torch.nn.functional as F
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
def nll(inp, targ):
return -inp[range(targ.shape[0]), targ].mean()
loss = nll(sm_pred, y_train)
loss
def log_softmax(x):
return x - x.exp().sum(-1, keepdim=True).log()
test_near(nll(log_softmax(pred), y_train), loss)
def log_softmax(x):
return x - x.logsumexp(-1, keepdim=True)
test_near(nll(log_softmax(pred), y_train), loss)
test_near(F.nll_loss(F.log_softmax(pred, -1), y_train), loss)
test_near(F.cross_entropy(pred, y_train), loss)
loss_func = F.cross_entropy
def acc(out, yb):
return (torch.argmax(out, -1) == yb).float().mean()
bs = 64
xb, yb = (x_train[:bs], y_train[:bs])
preds = model(xb)
(loss_func(preds, yb), acc(preds, yb))
lr = 0.5
epochs = 1
for e in range(epochs):
for i in range((n - 1) // bs + 1):
start_i = bs * i
end_i = bs * (i + 1)
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
loss = loss_func(model(xb), yb)
loss.backward()
with torch.no_grad():
for l in model.layers:
if hasattr(l, 'weight'):
l.weight -= l.weight.grad * lr
l.bias -= l.bias.grad * lr
l.weight.grad.zero_()
l.bias.grad.zero_()
(loss_func(model(xb), yb), acc(model(xb), yb))
model = Model(m, nh, 10)
model
|
code
|
17142199/cell_10
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
import torch.nn.functional as F
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
def log_softmax(x):
return (x.exp() / x.exp().sum(-1, keepdim=True)).log()
sm_pred = log_softmax(pred)
(sm_pred.shape, sm_pred[:3])
def nll(inp, targ):
return -inp[range(targ.shape[0]), targ].mean()
loss = nll(sm_pred, y_train)
loss
def log_softmax(x):
return x - x.exp().sum(-1, keepdim=True).log()
test_near(nll(log_softmax(pred), y_train), loss)
def log_softmax(x):
return x - x.logsumexp(-1, keepdim=True)
test_near(nll(log_softmax(pred), y_train), loss)
test_near(F.nll_loss(F.log_softmax(pred, -1), y_train), loss)
test_near(F.cross_entropy(pred, y_train), loss)
loss_func = F.cross_entropy
def acc(out, yb):
return (torch.argmax(out, -1) == yb).float().mean()
bs = 64
xb, yb = (x_train[:bs], y_train[:bs])
preds = model(xb)
(loss_func(preds, yb), acc(preds, yb))
|
code
|
17142199/cell_5
|
[
"text_plain_output_1.png"
] |
from torch import tensor, nn
x_train, y_train, x_valid, y_valid = get_data()
n, m = x_train.shape
c = y_train.max() + 1
nh = 50
(n, m, c, nh)
class Model(nn.Module):
def __init__(self, ni, nh, no):
super().__init__()
self.layers = [nn.Linear(ni, nh), nn.ReLU(), nn.Linear(nh, no)]
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
pred.shape
|
code
|
50221349/cell_9
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import numpy as np # linear algebra
import optuna
import optuna
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sklearn
train = pd.read_csv('/kaggle/input/jane-street-market-prediction/train.csv', nrows=30000)
features = [col for col in list(train.columns) if 'feature' in col]
train = train[train['weight'] != 0]
train['action'] = (train['resp'].values > 0).astype(int)
f_mean = train.mean()
train = train.fillna(f_mean)
X = train.loc[:, features]
y = train.loc[:, 'action']
del train
X = np.array(X)
y = np.array(y)
import optuna
def objective(trial):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
dtrain = lgb.Dataset(X_train, label=y_train)
param = {'objective': 'binary', 'metric': 'binary_logloss', 'verbosity': -1, 'boosting_type': 'gbdt', 'lambda_l1': trial.suggest_float('lambda_l1', 1e-08, 10.0, log=True), 'lambda_l2': trial.suggest_float('lambda_l2', 1e-08, 10.0, log=True), 'num_leaves': trial.suggest_int('num_leaves', 2, 256), 'feature_fraction': trial.suggest_float('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100)}
gbm = lgb.train(param, dtrain)
preds = gbm.predict(X_test)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(y_test, pred_labels)
return accuracy
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=100)
|
code
|
50221349/cell_7
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import lightgbm as lgb
import optuna
from sklearn.model_selection import train_test_split
import sklearn
|
code
|
50221349/cell_10
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import numpy as np # linear algebra
import optuna
import optuna
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sklearn
train = pd.read_csv('/kaggle/input/jane-street-market-prediction/train.csv', nrows=30000)
features = [col for col in list(train.columns) if 'feature' in col]
train = train[train['weight'] != 0]
train['action'] = (train['resp'].values > 0).astype(int)
f_mean = train.mean()
train = train.fillna(f_mean)
X = train.loc[:, features]
y = train.loc[:, 'action']
del train
X = np.array(X)
y = np.array(y)
import optuna
def objective(trial):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
dtrain = lgb.Dataset(X_train, label=y_train)
param = {'objective': 'binary', 'metric': 'binary_logloss', 'verbosity': -1, 'boosting_type': 'gbdt', 'lambda_l1': trial.suggest_float('lambda_l1', 1e-08, 10.0, log=True), 'lambda_l2': trial.suggest_float('lambda_l2', 1e-08, 10.0, log=True), 'num_leaves': trial.suggest_int('num_leaves', 2, 256), 'feature_fraction': trial.suggest_float('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100)}
gbm = lgb.train(param, dtrain)
preds = gbm.predict(X_test)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(y_test, pred_labels)
return accuracy
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=100)
print('Number of finished trials: {}'.format(len(study.trials)))
print('Best trial:')
trial = study.best_trial
print(' Value: {}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
|
code
|
18159225/cell_13
|
[
"text_html_output_1.png"
] |
from collections import Counter
from nltk.corpus import stopwords
import pandas as pd
import string
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.drop(['text_lower'], axis=1, inplace=True)
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
"""custom function to remove the punctuation"""
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['text_wo_punct'] = df['text'].apply(lambda text: remove_punctuation(text))
from nltk.corpus import stopwords
', '.join(stopwords.words('english'))
STOPWORDS = set(stopwords.words('english'))
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return ' '.join([word for word in str(text).split() if word not in STOPWORDS])
df['text_wo_stop'] = df['text_wo_punct'].apply(lambda text: remove_stopwords(text))
from collections import Counter
cnt = Counter()
for text in df['text_wo_stop'].values:
for word in text.split():
cnt[word] += 1
cnt.most_common(10)
FREQWORDS = set([w for w, wc in cnt.most_common(10)])
def remove_freqwords(text):
"""custom function to remove the frequent words"""
return ' '.join([word for word in str(text).split() if word not in FREQWORDS])
df['text_wo_stopfreq'] = df['text_wo_stop'].apply(lambda text: remove_freqwords(text))
df.head()
|
code
|
18159225/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.head()
|
code
|
18159225/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
import string
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.drop(['text_lower'], axis=1, inplace=True)
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
"""custom function to remove the punctuation"""
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['text_wo_punct'] = df['text'].apply(lambda text: remove_punctuation(text))
df.head()
|
code
|
18159225/cell_2
|
[
"text_html_output_1.png"
] |
import pandas as pd
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
full_df.head()
|
code
|
18159225/cell_19
|
[
"text_plain_output_1.png"
] |
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.snowball import SnowballStemmer
SnowballStemmer.languages
|
code
|
18159225/cell_8
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.corpus import stopwords
', '.join(stopwords.words('english'))
|
code
|
18159225/cell_15
|
[
"text_plain_output_1.png"
] |
from collections import Counter
from nltk.corpus import stopwords
import pandas as pd
import string
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.drop(['text_lower'], axis=1, inplace=True)
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
"""custom function to remove the punctuation"""
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['text_wo_punct'] = df['text'].apply(lambda text: remove_punctuation(text))
from nltk.corpus import stopwords
', '.join(stopwords.words('english'))
STOPWORDS = set(stopwords.words('english'))
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return ' '.join([word for word in str(text).split() if word not in STOPWORDS])
df['text_wo_stop'] = df['text_wo_punct'].apply(lambda text: remove_stopwords(text))
from collections import Counter
cnt = Counter()
for text in df['text_wo_stop'].values:
for word in text.split():
cnt[word] += 1
cnt.most_common(10)
FREQWORDS = set([w for w, wc in cnt.most_common(10)])
def remove_freqwords(text):
"""custom function to remove the frequent words"""
return ' '.join([word for word in str(text).split() if word not in FREQWORDS])
df['text_wo_stopfreq'] = df['text_wo_stop'].apply(lambda text: remove_freqwords(text))
df.drop(['text_wo_punct', 'text_wo_stop'], axis=1, inplace=True)
n_rare_words = 10
RAREWORDS = set([w for w, wc in cnt.most_common()[:-n_rare_words - 1:-1]])
def remove_rarewords(text):
"""custom function to remove the rare words"""
return ' '.join([word for word in str(text).split() if word not in RAREWORDS])
df['text_wo_stopfreqrare'] = df['text_wo_stopfreq'].apply(lambda text: remove_rarewords(text))
df.head()
|
code
|
18159225/cell_17
|
[
"text_html_output_1.png"
] |
from collections import Counter
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pandas as pd
import string
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.drop(['text_lower'], axis=1, inplace=True)
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
"""custom function to remove the punctuation"""
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['text_wo_punct'] = df['text'].apply(lambda text: remove_punctuation(text))
from nltk.corpus import stopwords
', '.join(stopwords.words('english'))
STOPWORDS = set(stopwords.words('english'))
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return ' '.join([word for word in str(text).split() if word not in STOPWORDS])
df['text_wo_stop'] = df['text_wo_punct'].apply(lambda text: remove_stopwords(text))
from collections import Counter
cnt = Counter()
for text in df['text_wo_stop'].values:
for word in text.split():
cnt[word] += 1
cnt.most_common(10)
FREQWORDS = set([w for w, wc in cnt.most_common(10)])
def remove_freqwords(text):
"""custom function to remove the frequent words"""
return ' '.join([word for word in str(text).split() if word not in FREQWORDS])
df['text_wo_stopfreq'] = df['text_wo_stop'].apply(lambda text: remove_freqwords(text))
df.drop(['text_wo_punct', 'text_wo_stop'], axis=1, inplace=True)
n_rare_words = 10
RAREWORDS = set([w for w, wc in cnt.most_common()[:-n_rare_words - 1:-1]])
def remove_rarewords(text):
"""custom function to remove the rare words"""
return ' '.join([word for word in str(text).split() if word not in RAREWORDS])
df['text_wo_stopfreqrare'] = df['text_wo_stopfreq'].apply(lambda text: remove_rarewords(text))
from nltk.stem.porter import PorterStemmer
df.drop(['text_wo_stopfreq', 'text_wo_stopfreqrare'], axis=1, inplace=True)
stemmer = PorterStemmer()
def stem_words(text):
return ' '.join([stemmer.stem(word) for word in text.split()])
df['text_stemmed'] = df['text'].apply(lambda text: stem_words(text))
df.head()
|
code
|
18159225/cell_10
|
[
"text_html_output_1.png"
] |
from nltk.corpus import stopwords
import pandas as pd
import string
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.drop(['text_lower'], axis=1, inplace=True)
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
"""custom function to remove the punctuation"""
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['text_wo_punct'] = df['text'].apply(lambda text: remove_punctuation(text))
from nltk.corpus import stopwords
', '.join(stopwords.words('english'))
STOPWORDS = set(stopwords.words('english'))
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return ' '.join([word for word in str(text).split() if word not in STOPWORDS])
df['text_wo_stop'] = df['text_wo_punct'].apply(lambda text: remove_stopwords(text))
df.head()
|
code
|
18159225/cell_12
|
[
"text_html_output_1.png"
] |
from collections import Counter
from nltk.corpus import stopwords
import pandas as pd
import string
import numpy as np
import pandas as pd
import re
import nltk
import spacy
import string
pd.options.mode.chained_assignment = None
full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000)
df = full_df[['text']]
df['text'] = df['text'].astype(str)
df['text_lower'] = df['text'].str.lower()
df.drop(['text_lower'], axis=1, inplace=True)
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
"""custom function to remove the punctuation"""
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['text_wo_punct'] = df['text'].apply(lambda text: remove_punctuation(text))
from nltk.corpus import stopwords
', '.join(stopwords.words('english'))
STOPWORDS = set(stopwords.words('english'))
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return ' '.join([word for word in str(text).split() if word not in STOPWORDS])
df['text_wo_stop'] = df['text_wo_punct'].apply(lambda text: remove_stopwords(text))
from collections import Counter
cnt = Counter()
for text in df['text_wo_stop'].values:
for word in text.split():
cnt[word] += 1
cnt.most_common(10)
|
code
|
129008050/cell_9
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
cor = data.corr()
data = data.drop(['cylinders', 'displacement'], axis=1)
data
cor = data.corr()
x_train = data.iloc[:, 1:]
y_train = data.iloc[:, 0:1]
x_train
|
code
|
129008050/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
df_c = data['car name'].astype('category')
data['car name'] = df_c.cat.codes
data['car name']
|
code
|
129008050/cell_6
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
plt.figure(figsize=(12, 10))
cor = data.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.show()
|
code
|
129008050/cell_2
|
[
"image_output_1.png"
] |
import pandas as pd
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
|
code
|
129008050/cell_1
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.decomposition import PCA
from sklearn import linear_model
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import seaborn as sns
|
code
|
129008050/cell_7
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
cor = data.corr()
data = data.drop(['cylinders', 'displacement'], axis=1)
data
|
code
|
129008050/cell_8
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
cor = data.corr()
data = data.drop(['cylinders', 'displacement'], axis=1)
data
plt.figure(figsize=(12, 10))
cor = data.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.show()
|
code
|
129008050/cell_15
|
[
"text_html_output_1.png"
] |
from sklearn import linear_model
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
cor = data.corr()
data = data.drop(['cylinders', 'displacement'], axis=1)
data
cor = data.corr()
x_train = data.iloc[:, 1:]
y_train = data.iloc[:, 0:1]
x_train
x_train.shape
model = linear_model.LinearRegression()
model.fit(x_train, y_train)
y_predict_val = model.predict(x_val)
y_predict_test = model.predict(x_test)
testing_rts = r2_score(y_predict_test, y_test)
testing_rts
|
code
|
129008050/cell_14
|
[
"text_plain_output_1.png"
] |
from sklearn import linear_model
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
cor = data.corr()
data = data.drop(['cylinders', 'displacement'], axis=1)
data
cor = data.corr()
x_train = data.iloc[:, 1:]
y_train = data.iloc[:, 0:1]
x_train
x_train.shape
model = linear_model.LinearRegression()
model.fit(x_train, y_train)
y_predict_val = model.predict(x_val)
training_rts = r2_score(y_predict_val, y_val)
training_rts
|
code
|
129008050/cell_12
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
cor = data.corr()
data = data.drop(['cylinders', 'displacement'], axis=1)
data
cor = data.corr()
x_train = data.iloc[:, 1:]
y_train = data.iloc[:, 0:1]
x_train
x_train.shape
|
code
|
129008050/cell_5
|
[
"image_output_1.png"
] |
import numpy as np
import pandas as pd
data = pd.read_csv('/kaggle/input/autompg-dataset/auto-mpg.csv', na_values='?', comment='\t', skipinitialspace=True)
data
data = data.replace(np.nan, 0)
data
|
code
|
73060893/cell_2
|
[
"text_plain_output_1.png"
] |
import warnings
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
import transformers
from transformers import AdamW, T5Tokenizer, T5ForConditionalGeneration
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
import torch_xla.distributed.xla_multiprocessing as xmp
import warnings
warnings.filterwarnings('ignore')
|
code
|
73060893/cell_1
|
[
"text_plain_output_1.png"
] |
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
!python pytorch-xla-env-setup.py --version nightly --apt-packages libomp5 libopenblas-dev
|
code
|
73060893/cell_3
|
[
"application_vnd.jupyter.stderr_output_9.png",
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"application_vnd.jupyter.stderr_output_6.png",
"application_vnd.jupyter.stderr_output_8.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_1.png"
] |
import transformers
class config:
MAX_LEN_I = 448
MAX_LEN_O = 224
TRAIN_BATCH_SIZE = 16
VALID_BATCH_SIZE = 8
EPOCHS = 15
MODEL_PATH = 'T5-base-TPU.pth'
TRAINING_FILE = '../input/table-to-text-generation-dataset-google-totto/totto_data/tablesWithTag.csv'
TOKENIZER = transformers.T5Tokenizer.from_pretrained('t5-base', do_lower_case=True)
|
code
|
73060893/cell_10
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from transformers import AdamW, T5Tokenizer, T5ForConditionalGeneration
import pandas as pd
import time
import torch
import torch_xla.core.xla_model as xm
special_tokens_dict = {'pad_token': '<pad>', 'bos_token': '<bos>', 'eos_token': '<eos>', 'additional_special_tokens': ['<PAGESTART>', '<PAGEEND>', '<SECTIONSTART>', '<SECTIONEND>', '<TABLESTART>', '<TABLEEND>', '<CELLSTART>', '<CELLEND>', '<COLHEADERSTART>', '<COLHEADEREND>', '<ROWHEADERSTART>', '<ROWHEADEREND>']}
num_added_toks = config.TOKENIZER.add_special_tokens(special_tokens_dict)
df = pd.read_csv(config.TRAINING_FILE)
train_df, val_df = train_test_split(df, test_size=0.1)
train_df = train_df.reset_index(drop=True)
val_df = val_df.reset_index(drop=True)
class tottoDataset(Dataset):
def __init__(self, df, tokenizer):
self.sentence = df['sentence']
self.table = df['table']
self.tokenizer = tokenizer
def __len__(self):
return len(self.sentence)
def __getitem__(self, idx):
inp = (self.table[idx] + '</s>').replace('<page_title>', '<PAGESTART>').replace('</page_title>', '<PAGEEND>').replace('<section_title>', '<SECTIONSTART>').replace('</section_title>', '<SECTIONEND>').replace('<table>', '<TABLESTART>').replace('</table>', '<TABLEEND>').replace('<cell>', '<CELLSTART>').replace('</cell>', '<CELLEND>').replace('<col_header>', '<COLHEADERSTART>').replace('</col_header>', '<COLHEADEREND>').replace('<row_header>', '<ROWHEADERSTART>').replace('</row_header>', '<ROWHEADEREND>')
out = self.sentence[idx] + '</s>'
inp_tokens = self.tokenizer.encode_plus(inp, padding='max_length', max_length=config.MAX_LEN_I, truncation=True)
out_tokens = self.tokenizer.encode_plus(out, padding='max_length', max_length=config.MAX_LEN_O, truncation=True)
inp_id = inp_tokens.input_ids
out_id = out_tokens.input_ids
inp_mask = inp_tokens.attention_mask
out_mask = out_tokens.attention_mask
labels = out_tokens.input_ids.copy()
labels = [-100 if x == self.tokenizer.pad_token_id else x for x in labels]
return {'table_text': inp, 'sentence': out, 'input_ids': torch.tensor(inp_id, dtype=torch.long), 'input_attention_mask': torch.tensor(inp_mask, dtype=torch.long), 'decoder_input_ids': torch.tensor(out_id, dtype=torch.long), 'decoder_attention_mask': torch.tensor(out_mask, dtype=torch.long), 'labels': torch.tensor(labels, dtype=torch.long)}
def train_fn(dataloader, model, optimizer, device, scheduler, epoch, num_epoch, num_steps):
model.train()
for i, batch in enumerate(dataloader):
input_ids = batch['input_ids'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids=input_ids, labels=labels)
loss = outputs.loss
loss.backward()
xm.optimizer_step(optimizer)
if scheduler is not None:
scheduler.step()
def eval_fn(dataloader, model, device, size):
model.eval()
loss = 0
with torch.no_grad():
for i, batch in enumerate(dataloader):
input_ids = batch['input_ids'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids=input_ids, labels=labels)
loss += outputs.loss.item()
return loss
model = T5ForConditionalGeneration.from_pretrained('t5-base', return_dict=True)
model.encoder.resize_token_embeddings(len(config.TOKENIZER))
model.decoder.resize_token_embeddings(len(config.TOKENIZER))
|
code
|
105187200/cell_25
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
data.groupby(['weekday'])['daily_conversion_rate '].sum().reindex(cats).plot(figsize=(16, 6), subplots=True)
|
code
|
105187200/cell_23
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
data.groupby(['week_number'])['daily_conversion_rate '].sum().plot(figsize=(16, 6), subplots=True)
|
code
|
105187200/cell_30
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
data.groupby(['week_number'])['conversion_rate_to_unlockpage '].sum().plot(figsize=(16, 6), subplots=True)
|
code
|
105187200/cell_20
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
fig, ax = plt.subplots(figsize=(13, 6))
ax.bar(data['action_date'], data['daily_conversion_rate '])
ax.set(xlabel='Date', ylabel='Daily Conversion rate(%)', title='Daily Conversion Rate to trial')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
|
code
|
105187200/cell_39
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['daily_conversion_rate ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Daily Conversion rate(%)",
title="Daily Conversion Rate to trial")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['conversion_rate_to_unlockpage ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Unlock Page Conversion rate(%)",
title="Daily Unlock Page Conversion")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
fig, ax = plt.subplots(figsize=(13, 6))
ax.set(xlabel='conversion_rate_to_unlockpage(%)', ylabel='daily_conversion_rate', title='Relationship between unlock page view rate to trial rate')
sns.scatterplot(data['conversion_rate_to_unlockpage '], data['daily_conversion_rate '])
|
code
|
105187200/cell_48
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
data['week_number'] = pd.to_datetime(data['action_date']).dt.strftime('%U')
data['weekday'] = pd.to_datetime(data['action_date']).dt.day_name()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
pd.Series(data['daily_conversion_rate ']).corr(pd.Series(data['conversion_rate_to_unlockpage ']))
data_2 = pd.read_csv('/kaggle/input/brainly-2/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.2.csv')
data_2
|
code
|
105187200/cell_41
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['daily_conversion_rate ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Daily Conversion rate(%)",
title="Daily Conversion Rate to trial")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['conversion_rate_to_unlockpage ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Unlock Page Conversion rate(%)",
title="Daily Unlock Page Conversion")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
fig, ax = plt.subplots(figsize=(13,6))
# Set title and labels for axes
ax.set(xlabel="conversion_rate_to_unlockpage(%)",
ylabel="daily_conversion_rate",
title="Relationship between unlock page view rate to trial rate")
sns.scatterplot( data["conversion_rate_to_unlockpage "], data["daily_conversion_rate "]);
fig, ax = plt.subplots(figsize=(13, 6))
ax.set(xlabel='unlock_users', ylabel='trial_users', title='Relationship between trial users and unlock users')
sns.scatterplot(data['unlock_users'], data['trial_users'])
|
code
|
105187200/cell_50
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['daily_conversion_rate ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Daily Conversion rate(%)",
title="Daily Conversion Rate to trial")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
data['week_number'] = pd.to_datetime(data['action_date']).dt.strftime('%U')
data['weekday'] = pd.to_datetime(data['action_date']).dt.day_name()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['conversion_rate_to_unlockpage ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Unlock Page Conversion rate(%)",
title="Daily Unlock Page Conversion")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
pd.Series(data['daily_conversion_rate ']).corr(pd.Series(data['conversion_rate_to_unlockpage ']))
fig, ax = plt.subplots(figsize=(13,6))
# Set title and labels for axes
ax.set(xlabel="conversion_rate_to_unlockpage(%)",
ylabel="daily_conversion_rate",
title="Relationship between unlock page view rate to trial rate")
sns.scatterplot( data["conversion_rate_to_unlockpage "], data["daily_conversion_rate "]);
fig, ax = plt.subplots(figsize=(13,6))
# Set title and labels for axes
ax.set(xlabel="unlock_users",
ylabel="trial_users",
title="Relationship between trial users and unlock users")
sns.scatterplot(data["unlock_users"], data["trial_users"] );
data_2 = pd.read_csv('/kaggle/input/brainly-2/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.2.csv')
data_2['answer_added (%)'] = data_2['users_at_least_1_answer_added'] / data_2['users_all'] * 100
fig, ax = plt.subplots(figsize=(13, 6))
ax.set(title='Behavior of converted and non converted users in answer_added')
sns.barplot(x=data_2['user_converted_to_trial'], y=data_2['answer_added (%)'])
|
code
|
105187200/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
105187200/cell_18
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
data.head()
|
code
|
105187200/cell_32
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
data.groupby(['weekday'])['conversion_rate_to_unlockpage '].sum().reindex(cats).plot(figsize=(16, 6), subplots=True)
|
code
|
105187200/cell_51
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['daily_conversion_rate ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Daily Conversion rate(%)",
title="Daily Conversion Rate to trial")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
data['week_number'] = pd.to_datetime(data['action_date']).dt.strftime('%U')
data['weekday'] = pd.to_datetime(data['action_date']).dt.day_name()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['conversion_rate_to_unlockpage ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Unlock Page Conversion rate(%)",
title="Daily Unlock Page Conversion")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
pd.Series(data['daily_conversion_rate ']).corr(pd.Series(data['conversion_rate_to_unlockpage ']))
fig, ax = plt.subplots(figsize=(13,6))
# Set title and labels for axes
ax.set(xlabel="conversion_rate_to_unlockpage(%)",
ylabel="daily_conversion_rate",
title="Relationship between unlock page view rate to trial rate")
sns.scatterplot( data["conversion_rate_to_unlockpage "], data["daily_conversion_rate "]);
fig, ax = plt.subplots(figsize=(13,6))
# Set title and labels for axes
ax.set(xlabel="unlock_users",
ylabel="trial_users",
title="Relationship between trial users and unlock users")
sns.scatterplot(data["unlock_users"], data["trial_users"] );
data_2 = pd.read_csv('/kaggle/input/brainly-2/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.2.csv')
data_2["answer_added (%)"]=data_2["users_at_least_1_answer_added"]/data_2["users_all"]*100
fig, ax = plt.subplots(figsize=(13,6))
# Set title and labels for axes
ax.set(title="Behavior of converted and non converted users in answer_added")
sns.barplot(x=data_2["user_converted_to_trial"],y=data_2["answer_added (%)"] )
data_2['question_added (%)'] = data_2['users_at_least_1_question_added'] / data_2['users_all'] * 100
fig, ax = plt.subplots(figsize=(13, 6))
ax.set(title='Behavior of converted and non converted users in questions_added')
sns.barplot(x=data_2['user_converted_to_trial'], y=data_2['question_added (%)'])
|
code
|
105187200/cell_28
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
fig, ax = plt.subplots(figsize=(13,6))
# Add x-axis and y-axis
ax.bar(data["action_date"],
data['daily_conversion_rate ']
)
# Set title and labels for axes
ax.set(xlabel="Date",
ylabel="Daily Conversion rate(%)",
title="Daily Conversion Rate to trial")
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
fig, ax = plt.subplots(figsize=(13, 6))
ax.bar(data['action_date'], data['conversion_rate_to_unlockpage '])
ax.set(xlabel='Date', ylabel='Unlock Page Conversion rate(%)', title='Daily Unlock Page Conversion')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.show()
|
code
|
105187200/cell_8
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
|
code
|
105187200/cell_15
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.head()
|
code
|
105187200/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.head()
|
code
|
105187200/cell_36
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/brainly/IC2 Data Analyst Taske home task - dataset for Part 2 - 2.1.csv')
data.shape
data.sort_values(by=['action_date'], inplace=True)
data['week_number'] = pd.to_datetime(data['action_date']).dt.strftime('%U')
data['weekday'] = pd.to_datetime(data['action_date']).dt.day_name()
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
pd.Series(data['daily_conversion_rate ']).corr(pd.Series(data['conversion_rate_to_unlockpage ']))
|
code
|
33112913/cell_4
|
[
"text_plain_output_1.png"
] |
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32)
Y = np.asarray(data.iloc[:, -1])
np.sum(data.iloc[:, 1:94] > 40)
|
code
|
33112913/cell_6
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import StandardScaler
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32)
Y = np.asarray(data.iloc[:, -1])
from sklearn.preprocessing import StandardScaler
X_standard = StandardScaler().fit_transform(X)
X_standard
|
code
|
33112913/cell_2
|
[
"text_plain_output_1.png"
] |
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32)
print(X.shape)
Y = np.asarray(data.iloc[:, -1])
print(Y, Y.shape, len(np.unique(Y)))
|
code
|
33112913/cell_1
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
|
code
|
33112913/cell_7
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import cross_validate
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32)
Y = np.asarray(data.iloc[:, -1])
from sklearn.preprocessing import StandardScaler
X_standard = StandardScaler().fit_transform(X)
X_standard
from sklearn.model_selection import cross_validate
knn = KNeighborsClassifier(n_neighbors=9)
cv_results = cross_validate(knn, X_standard, Y, cv=5)
cv_results['test_score']
|
code
|
33112913/cell_3
|
[
"text_html_output_1.png"
] |
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32)
Y = np.asarray(data.iloc[:, -1])
data.describe()
|
code
|
33112913/cell_5
|
[
"text_plain_output_1.png"
] |
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import pandas as pd
import os
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import os
data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv')
data.sample(10)
X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32)
Y = np.asarray(data.iloc[:, -1])
np.sum(data.iloc[:, 1:94] > 40)
data.isnull().sum()
|
code
|
50227590/cell_7
|
[
"text_plain_output_1.png"
] |
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
digits = datasets.load_digits()
breast = datasets.load_breast_cancer()
X_digits = digits.data
y_digits = digits.target
X_breast = breast.data
y_breast = breast.target
Bern_clf = BernoulliNB()
Mult_clf = MultinomialNB()
Gauss_clf = GaussianNB()
Bern_val_score_digits = cross_val_score(Bern_clf, X_digits, y_digits).mean()
Mult_val_score_digits = cross_val_score(Mult_clf, X_digits, y_digits).mean()
Gauss_val_score_digits = cross_val_score(Gauss_clf, X_digits, y_digits).mean()
Bern_val_score_breast = cross_val_score(Bern_clf, X_breast, y_breast).mean()
Mult_val_score_breast = cross_val_score(Mult_clf, X_breast, y_breast).mean()
Gauss_val_score_breast = cross_val_score(Gauss_clf, X_breast, y_breast).mean()
print('BernoulliNB, digits = ', Bern_val_score_digits)
print('MultinomialNB, digits = ', Mult_val_score_digits)
print('GaussianNB, digits = ', Gauss_val_score_digits)
print('')
print('BernoulliNB, breast = ', Bern_val_score_breast)
print('MultinomialNB, breast = ', Mult_val_score_breast)
print('GaussianNB, breast = ', Gauss_val_score_breast)
|
code
|
73079642/cell_21
|
[
"text_plain_output_1.png"
] |
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout
from keras.models import Model
from keras.models import load_model
from tensorflow.keras import losses
import matplotlib.pyplot as plt
import numpy as np
def noise(a1, a2, channel):
"""
Adds random noise to each image in the supplied array.
"""
if channel == 1:
noise_factor = 0.2
noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape)
noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape)
else:
noi = 0.1
noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape)
noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape)
ab1 = np.clip(noisy_arr1, 0, 1)
ab2 = np.clip(noisy_arr2, 0, 1)
return (ab1, ab2)
# Visualization for mnist, cifar10, noisy, denoised/predictions data
def display(rows, cols, a, b, check=False ):
'''rows: defining no. of rows in figure
cols: defining no. of colums in figure
a: train images without noise or noisy_image while test
prediction
b: train images with noise or denoised_image based while test
prediction
check: default False for 32*32 cifar10, true for 28*28
mnist dataset and any predictions
'''
# defining a figure
f = plt.figure(figsize=(2*cols,2*rows*2))
for i in range(rows):
for j in range(cols):
# adding subplot to figure on each iteration
f.add_subplot(rows*2,cols, (2*i*cols)+(j+1))
if check:
plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues")
else:
plt.imshow(a[i*cols + j])
plt.axis("off")
for j in range(cols):
# adding subplot to figure on each iteration
f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1))
if check:
plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues")
else:
plt.imshow(b[i*cols + j])
plt.axis("off")
plt.axis("off")
#f.suptitle("Sample Training Data",fontsize=18)
plt.savefig("ss.png")
plt.show()
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, 3, activation='relu', padding='same')(inputs)
x = MaxPool2D()(x)
x = Dropout(0.2)(x)
x = Conv2D(32, 3, activation='relu', padding='same')(x)
encoded = MaxPool2D()(x)
x = Conv2D(32, 3, activation='relu', padding='same')(encoded)
x = UpSampling2D()(x)
x = Dropout(0.2)(x)
x = Conv2D(32, 3, activation='relu', padding='same')(x)
x = UpSampling2D()(x)
decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x)
from tensorflow.keras import losses
autoencoder1 = Model(inputs, decoded)
autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy)
autoencoder1.summary()
history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data))
autoencoder1.save('autoencoder_model1.h5')
from keras.models import load_model
model1 = load_model('autoencoder_model1.h5')
num_imgs = 45
rand = np.random.randint(1, 100)
test_images = noisy_test_data[rand:rand + num_imgs]
test_denoised = model1.predict(test_images)
display(2, 6, test_images, test_denoised, check=True)
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.