path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105206399/cell_7
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() def check(df): l = [] columns = df.columns for col in columns: dtypes = df[col].dtypes nunique = df[col].nunique() sum_null = df[col].isnull().sum() l.append([col, dtypes, nunique, sum_null]) df_check = pd.DataFrame(l) df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null'] return df_check check(df_cars)
code
105206399/cell_45
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train)
code
105206399/cell_49
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train) rid.score(x_train, y_train) rid.score(x_test, y_test) rid.intercept_ rid.coef_
code
105206399/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.isnull().sum() df_cars.duplicated().sum() df_cars.columns df_cars.drop('car_ID', axis=1, inplace=True) df_cars.CarName.unique()
code
105206399/cell_59
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train) from sklearn import linear_model lass = linear_model.Lasso(alpha=0.6) lass.fit(x_train, y_train) lass.score(x_train, y_train) lass.score(x_test, y_test) lass.intercept_
code
105206399/cell_58
[ "image_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train) from sklearn import linear_model lass = linear_model.Lasso(alpha=0.6) lass.fit(x_train, y_train) lass.score(x_train, y_train) lass.score(x_test, y_test)
code
105206399/cell_8
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.head()
code
105206399/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.isnull().sum() df_cars.duplicated().sum() df_cars.columns df_cars.drop('car_ID', axis=1, inplace=True) df_cars.hist(bins=40, figsize=(20, 15), color='b')
code
105206399/cell_38
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) reg.score(x_train, y_train) reg.score(x_test, y_test) reg.intercept_ reg.coef_
code
105206399/cell_47
[ "image_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train) rid.score(x_train, y_train) rid.score(x_test, y_test)
code
105206399/cell_35
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) reg.score(x_train, y_train)
code
105206399/cell_43
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import RobustScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() def check(df): l = [] columns = df.columns for col in columns: dtypes = df[col].dtypes nunique = df[col].nunique() sum_null = df[col].isnull().sum() l.append([col, dtypes, nunique, sum_null]) df_check = pd.DataFrame(l) df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null'] return df_check check(df_cars) df_cars.isnull().sum() df_cars.duplicated().sum() df_cars.columns df_cars.drop('car_ID', axis=1, inplace=True) df_cars.CarName.unique() df_cars.CarName.unique() categorical_cols = df_cars.select_dtypes(include=['object']).columns c = df_cars.corr() cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']] x = cars.drop('price', axis=1).values y = cars['price'].values from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) reg.score(x_train, y_train) reg.score(x_test, y_test) reg.intercept_ reg.coef_ pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients']) y_pred_1 = reg.predict(x_test) df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1}) plt.figure(figsize=(10, 8)) plt.plot(df_1[:50]) plt.legend(['Actualy', 'predicted'])
code
105206399/cell_46
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train) rid.score(x_train, y_train)
code
105206399/cell_14
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.isnull().sum() df_cars.duplicated().sum() df_cars.columns
code
105206399/cell_22
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.isnull().sum() df_cars.duplicated().sum() df_cars.columns df_cars.drop('car_ID', axis=1, inplace=True) df_cars.CarName.unique() df_cars.CarName.unique() categorical_cols = df_cars.select_dtypes(include=['object']).columns for i in categorical_cols: print(i, 'unique values') print('-----------------------------------') print(df_cars[i].unique()) print('-----------------------------------------------------------------------------')
code
105206399/cell_53
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn import linear_model from sklearn.preprocessing import RobustScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() def check(df): l = [] columns = df.columns for col in columns: dtypes = df[col].dtypes nunique = df[col].nunique() sum_null = df[col].isnull().sum() l.append([col, dtypes, nunique, sum_null]) df_check = pd.DataFrame(l) df_check.columns = ['column', 'dtypes', 'nunique', 'sum_null'] return df_check check(df_cars) df_cars.isnull().sum() df_cars.duplicated().sum() df_cars.columns df_cars.drop('car_ID', axis=1, inplace=True) df_cars.CarName.unique() df_cars.CarName.unique() categorical_cols = df_cars.select_dtypes(include=['object']).columns c = df_cars.corr() cars = df_cars[['wheelbase', 'carlength', 'carwidth', 'curbweight', 'enginesize', 'boreratio', 'horsepower', 'citympg', 'highwaympg', 'drivewheel', 'fuelsystem', 'price']] x = cars.drop('price', axis=1).values y = cars['price'].values from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) reg.score(x_train, y_train) reg.score(x_test, y_test) reg.intercept_ reg.coef_ pd.DataFrame(reg.coef_, cars.columns[:-1], columns=['coeficients']) y_pred_1 = reg.predict(x_test) df_1 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_1}) from sklearn import linear_model rid = linear_model.Ridge(alpha=0.9) rid.fit(x_train, y_train) rid.score(x_train, y_train) rid.score(x_test, y_test) rid.intercept_ rid.coef_ pd.DataFrame(rid.coef_, cars.columns[:-1], columns=['coeficients']) y_pred_2 = rid.predict(x_test) df_2 = pd.DataFrame({'y_test': y_test, 'Y_pred': y_pred_2}) df_2.head(10)
code
105206399/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.isnull().sum()
code
105206399/cell_37
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) reg.score(x_train, y_train) reg.score(x_test, y_test) reg.intercept_
code
105206399/cell_12
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df_cars = data.copy() df_cars.isnull().sum() df_cars.duplicated().sum()
code
105206399/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import RobustScaler ro_scaler = RobustScaler() x_train = ro_scaler.fit_transform(x_train) x_test = ro_scaler.fit_transform(x_test) from sklearn import linear_model reg = linear_model.LinearRegression() reg.fit(x_train, y_train) reg.score(x_train, y_train) reg.score(x_test, y_test)
code
33108902/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33108902/cell_15
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import mean_absolute_error final_model = RandomForestClassifier(max_leaf_nodes=7, random_state=0) y_pred = final_model.fit(x_treino, y_treino) accuracy = final_model.score(x_teste, y_teste) print(accuracy)
code
33108902/cell_14
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(train_X, train_y) preds_val = model.predict(val_X) mae = mean_absolute_error(val_y, preds_val) return mae candidate_max_leaf_nodes = [5, 25, 30, 40, 50, 100, 250, 500] best_value = 0 controle = 0 for max_leaf_nodes in candidate_max_leaf_nodes: my_mae = get_mae(max_leaf_nodes, x_treino, x_teste, y_treino, y_teste) if best_value == 0: controle = my_mae best_value = max_leaf_nodes elif controle > my_mae: controle = my_mae best_value = max_leaf_nodes best_tree_size = best_value best_tree_size
code
33108902/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np arquivos_de_treino = pd.read_csv('/kaggle/input/titanic/train.csv') arquivos_de_teste = pd.read_csv('/kaggle/input/titanic/test.csv') arquivos_de_treino = arquivos_de_treino.replace(np.nan, 0) arquivos_de_teste = arquivos_de_teste.replace(np.nan, 0) arquivos_de_treino.head()
code
324947/cell_13
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter print('Player Y value: # of instances in database (home players)') Counter(Y)
code
324947/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() df = pd.read_sql(sql='SELECT {} FROM Match'.format('id, country_id, league_id, season, stage, ' + 'date, home_team_api_id, away_team_api_id, ' + 'home_team_goal, away_team_goal'), con=conn) df.head()
code
324947/cell_30
[ "text_plain_output_1.png" ]
from collections import Counter import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for sn, s in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for k, v in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats all_ids = c.execute('SELECT id FROM Match').fetchall() all_ids = [i[0] for i in sorted(all_ids)] hF, hM, hD, hG = ([], [], [], []) aF, aM, aD, aG = ([], [], [], []) for i in all_ids: h_stats = getTeamScores(i, 'home', EA_stats, printout=False) hF.append(h_stats['F'][0]) hM.append(h_stats['M'][0]) hD.append(h_stats['D'][0]) hG.append(h_stats['G'][0]) a_stats = getTeamScores(i, 'away', EA_stats, printout=False) aF.append(a_stats['F'][0]) aM.append(a_stats['M'][0]) aD.append(a_stats['D'][0]) aG.append(a_stats['G'][0]) df = pd.read_sql(sql='SELECT {} FROM Match'.format('id, country_id, league_id, season, stage, ' + 'date, home_team_api_id, away_team_api_id, ' + 'home_team_goal, away_team_goal'), con=conn) features = ['home_F_stats', 'home_M_stats', 'home_D_stats', 'home_G_stats', 'away_F_stats', 'away_M_stats', 'away_D_stats', 'away_G_stats'] data = [hF, hM, hD, hG, aF, aM, aD, aG] for f, d in zip(features, data): df[f] = d df = df.dropna() H = lambda x: x[0] > x[1] D = lambda x: x[0] == x[1] A = lambda x: x[0] < x[1] state, result = ([], []) for goals in df[['home_team_goal', 'away_team_goal']].values: r = np.array([H(goals), D(goals), A(goals)]) state.append(r) if (r == [1, 0, 0]).sum() == 3: result.append(1) elif (r == [0, 1, 0]).sum() == 3: result.append(2) elif (r == [0, 0, 1]).sum() == 3: result.append(3) df['game_state'] = state df['game_result'] = result df['date'] = pd.to_datetime(df['date']) df['country'] = df['country_id'].map(id_country) df['league'] = df['league_id'].map(id_league) f = lambda x: np.mean(x) df['home_mean_stats'] = list(map(f, df[['home_F_stats', 'home_M_stats', 'home_D_stats', 'home_G_stats']].values)) df['away_mean_stats'] = list(map(f, df[['away_F_stats', 'away_M_stats', 'away_D_stats', 'away_G_stats']].values)) df.dtypes sns.pairplot(data=df[features]) plt.suptitle('EA Sports FIFA positional game ratings correlations', fontsize=30, y=1.02) plt.show()
code
324947/cell_33
[ "image_output_1.png" ]
from collections import Counter import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for sn, s in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for k, v in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats all_ids = c.execute('SELECT id FROM Match').fetchall() all_ids = [i[0] for i in sorted(all_ids)] hF, hM, hD, hG = ([], [], [], []) aF, aM, aD, aG = ([], [], [], []) for i in all_ids: h_stats = getTeamScores(i, 'home', EA_stats, printout=False) hF.append(h_stats['F'][0]) hM.append(h_stats['M'][0]) hD.append(h_stats['D'][0]) hG.append(h_stats['G'][0]) a_stats = getTeamScores(i, 'away', EA_stats, printout=False) aF.append(a_stats['F'][0]) aM.append(a_stats['M'][0]) aD.append(a_stats['D'][0]) aG.append(a_stats['G'][0]) df = pd.read_sql(sql='SELECT {} FROM Match'.format('id, country_id, league_id, season, stage, ' + 'date, home_team_api_id, away_team_api_id, ' + 'home_team_goal, away_team_goal'), con=conn) features = ['home_F_stats', 'home_M_stats', 'home_D_stats', 'home_G_stats', 'away_F_stats', 'away_M_stats', 'away_D_stats', 'away_G_stats'] data = [hF, hM, hD, hG, aF, aM, aD, aG] for f, d in zip(features, data): df[f] = d df = df.dropna() H = lambda x: x[0] > x[1] D = lambda x: x[0] == x[1] A = lambda x: x[0] < x[1] state, result = ([], []) for goals in df[['home_team_goal', 'away_team_goal']].values: r = np.array([H(goals), D(goals), A(goals)]) state.append(r) if (r == [1, 0, 0]).sum() == 3: result.append(1) elif (r == [0, 1, 0]).sum() == 3: result.append(2) elif (r == [0, 0, 1]).sum() == 3: result.append(3) df['game_state'] = state df['game_result'] = result df['date'] = pd.to_datetime(df['date']) df['country'] = df['country_id'].map(id_country) df['league'] = df['league_id'].map(id_league) f = lambda x: np.mean(x) df['home_mean_stats'] = list(map(f, df[['home_F_stats', 'home_M_stats', 'home_D_stats', 'home_G_stats']].values)) df['away_mean_stats'] = list(map(f, df[['away_F_stats', 'away_M_stats', 'away_D_stats', 'away_G_stats']].values)) df.dtypes df.date.hist(bins=100) plt.title('Frequency of games in all countries') plot_width = (df.date.max() - df.date.min()).days bin_width = plot_width / 100 print('bin_width = {0:.1f} days'.format(bin_width)) plt.show()
code
324947/cell_26
[ "text_html_output_1.png" ]
from collections import Counter import datetime as dt import numpy as np import pandas as pd import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for sn, s in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for k, v in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats df = pd.read_sql(sql='SELECT {} FROM Match'.format('id, country_id, league_id, season, stage, ' + 'date, home_team_api_id, away_team_api_id, ' + 'home_team_goal, away_team_goal'), con=conn) df = df.dropna() H = lambda x: x[0] > x[1] D = lambda x: x[0] == x[1] A = lambda x: x[0] < x[1] state, result = ([], []) for goals in df[['home_team_goal', 'away_team_goal']].values: r = np.array([H(goals), D(goals), A(goals)]) state.append(r) if (r == [1, 0, 0]).sum() == 3: result.append(1) elif (r == [0, 1, 0]).sum() == 3: result.append(2) elif (r == [0, 0, 1]).sum() == 3: result.append(3) df['game_state'] = state df['game_result'] = result df['date'] = pd.to_datetime(df['date']) df['country'] = df['country_id'].map(id_country) df['league'] = df['league_id'].map(id_league) f = lambda x: np.mean(x) df['home_mean_stats'] = list(map(f, df[['home_F_stats', 'home_M_stats', 'home_D_stats', 'home_G_stats']].values)) df['away_mean_stats'] = list(map(f, df[['away_F_stats', 'away_M_stats', 'away_D_stats', 'away_G_stats']].values)) df.dtypes
code
324947/cell_19
[ "text_plain_output_1.png" ]
from collections import Counter import datetime as dt import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for sn, s in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for k, v in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats all_ids = c.execute('SELECT id FROM Match').fetchall() all_ids = [i[0] for i in sorted(all_ids)] hF, hM, hD, hG = ([], [], [], []) aF, aM, aD, aG = ([], [], [], []) for i in all_ids: h_stats = getTeamScores(i, 'home', EA_stats, printout=False) hF.append(h_stats['F'][0]) hM.append(h_stats['M'][0]) hD.append(h_stats['D'][0]) hG.append(h_stats['G'][0]) a_stats = getTeamScores(i, 'away', EA_stats, printout=False) aF.append(a_stats['F'][0]) aM.append(a_stats['M'][0]) aD.append(a_stats['D'][0]) aG.append(a_stats['G'][0])
code
324947/cell_7
[ "image_output_1.png" ]
import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league
code
324947/cell_16
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from collections import Counter import datetime as dt import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for sn, s in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for k, v in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats avg = getTeamScores(999, 'home', EA_stats, printout=True) avg
code
324947/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from collections import Counter import datetime as dt import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for sn, s in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for k, v in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats avg = getTeamScores(999, 'home', EA_stats, printout=True) avg avg = getTeamScores(5, 'home', EA_stats, printout=True) avg
code
324947/cell_10
[ "text_plain_output_1.png" ]
import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for i, n in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for i, n in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall()
code
105198654/cell_6
[ "text_plain_output_1.png" ]
import os def convert_bytes(size): """ Convert bytes to KB, or MB or GB""" for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if size < 1024.0: return '%3.1f %s' % (size, x) size /= 1024.0 file_list = ['train_cite_inputs.h5', 'train_cite_targets.h5', 'train_multi_inputs.h5', 'train_multi_targets.h5', 'test_cite_inputs.h5', 'test_multi_inputs.h5'] for f in file_list: f_path = f'../input/open-problems-multimodal/{f}' f_size = os.path.getsize(f_path) f_size_converted = convert_bytes(f_size) print(f'{f} :', f_size_converted)
code
105198654/cell_11
[ "text_plain_output_1.png" ]
import h5py import os def convert_bytes(size): """ Convert bytes to KB, or MB or GB""" for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if size < 1024.0: return '%3.1f %s' % (size, x) size /= 1024.0 file_list = ['train_cite_inputs.h5', 'train_cite_targets.h5', 'train_multi_inputs.h5', 'train_multi_targets.h5', 'test_cite_inputs.h5', 'test_multi_inputs.h5'] for f in file_list: f_path = f'../input/open-problems-multimodal/{f}' f_size = os.path.getsize(f_path) f_size_converted = convert_bytes(f_size) train_multi_inputs = 'train_multi_inputs.h5' f_path = f'../input/open-problems-multimodal/{train_multi_inputs}' f = h5py.File(f_path, 'r') group = f['train_multi_inputs'] print(list(group.keys())) for group_key in group.keys(): print('Scanning ' + group_key + '...') print('Shape: ' + str(group[group_key].shape))
code
105198654/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os import h5py import hdf5plugin pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) from sklearn.metrics import mean_absolute_error from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error import lightgbm as lgbm from lightgbm import LGBMClassifier
code
73070388/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() s = train.dtypes == 'object' object_cols = list(s[s].index) for col in object_cols: train[col] = encoder.fit_transform(train[col]) test[col] = encoder.transform(test[col]) X = train y = train.target X_test = test X.info()
code
73070388/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') import matplotlib.pyplot as plt train.info()
code
73070388/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.info()
code
73070388/cell_11
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() s = train.dtypes == 'object' object_cols = list(s[s].index) for col in object_cols: train[col] = encoder.fit_transform(train[col]) test[col] = encoder.transform(test[col]) train.info()
code
73070388/cell_19
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() s = train.dtypes == 'object' object_cols = list(s[s].index) for col in object_cols: train[col] = encoder.fit_transform(train[col]) test[col] = encoder.transform(test[col]) X = train y = train.target X_test = test from xgboost import XGBRegressor xgb = XGBRegressor(tree_method='gpu_hist', gpu_id=0) xgb.fit(X, y)
code
73070388/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73070388/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') import matplotlib.pyplot as plt train.hist(bins=50, figsize=(20, 15)) plt.show()
code
73070388/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.head()
code
73070388/cell_14
[ "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() s = train.dtypes == 'object' object_cols = list(s[s].index) for col in object_cols: train[col] = encoder.fit_transform(train[col]) test[col] = encoder.transform(test[col]) X = train y = train.target X_test = test y
code
73070388/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test.info()
code
73070388/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) Sample_result = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.describe()
code
130013764/cell_13
[ "text_plain_output_100.png", "text_plain_output_334.png", "text_plain_output_673.png", "text_plain_output_445.png", "text_plain_output_640.png", "text_plain_output_201.png", "text_plain_output_586.png", "text_plain_output_261.png", "text_plain_output_565.png", "text_plain_output_522.png", "text_plain_output_84.png", "text_plain_output_624.png", "text_plain_output_521.png", "text_plain_output_322.png", "text_plain_output_205.png", "text_plain_output_511.png", "text_plain_output_608.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_282.png", "text_plain_output_579.png", "text_plain_output_629.png", "text_plain_output_396.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_362.png", "text_plain_output_35.png", "text_plain_output_501.png", "text_plain_output_593.png", "text_plain_output_258.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_598.png", "text_plain_output_490.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_367.png", "text_plain_output_262.png", "text_plain_output_278.png", "text_plain_output_588.png", "text_plain_output_395.png", "text_plain_output_617.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_570.png", "text_plain_output_674.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_671.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_678.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_614.png", "text_plain_output_420.png", "text_plain_output_514.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_576.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_670.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "text_plain_output_184.png", "text_plain_output_477.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_664.png", "text_plain_output_627.png", "text_plain_output_613.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_331.png", "text_plain_output_5.png", "text_plain_output_642.png", "text_plain_output_550.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_676.png", "text_plain_output_492.png", "text_plain_output_321.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_634.png", "text_plain_output_656.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "text_plain_output_651.png", "text_plain_output_437.png", "text_plain_output_198.png", "text_plain_output_387.png", "text_plain_output_555.png", "text_plain_output_548.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "text_plain_output_114.png", "text_plain_output_659.png", "text_plain_output_515.png", "text_plain_output_157.png", "text_plain_output_494.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_484.png", "text_plain_output_44.png", "text_plain_output_633.png", "text_plain_output_325.png", "text_plain_output_203.png", "text_plain_output_505.png", "text_plain_output_603.png", "text_plain_output_655.png", "text_plain_output_119.png", "text_plain_output_546.png", "text_plain_output_540.png", "text_plain_output_373.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "text_plain_output_551.png", "text_plain_output_583.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_343.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_604.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_639.png", "text_plain_output_20.png", "text_plain_output_557.png", "text_plain_output_273.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_669.png", "text_plain_output_414.png", "text_plain_output_461.png", "text_plain_output_510.png", "text_plain_output_222.png", "text_plain_output_589.png", "text_plain_output_101.png", "text_plain_output_530.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_489.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_467.png", "text_plain_output_502.png", "text_plain_output_221.png", "text_plain_output_596.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_654.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_638.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_618.png", "text_plain_output_64.png", "text_plain_output_419.png", "text_plain_output_215.png", "text_plain_output_532.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_637.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_666.png", "text_plain_output_107.png", "text_plain_output_567.png", "text_plain_output_628.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_plain_output_318.png", "text_plain_output_417.png", "text_plain_output_52.png", "text_plain_output_545.png", "text_plain_output_393.png", "text_plain_output_572.png", "text_plain_output_594.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "text_plain_output_611.png", "text_plain_output_45.png", "text_plain_output_380.png", "text_plain_output_599.png", "text_plain_output_442.png", "text_plain_output_665.png", "text_plain_output_300.png", "text_plain_output_660.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "text_plain_output_476.png", "text_plain_output_277.png", "text_plain_output_457.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_518.png", "text_plain_output_561.png", "text_plain_output_431.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "text_plain_output_140.png", "text_plain_output_606.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_129.png", "text_plain_output_349.png", "text_plain_output_242.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "text_plain_output_255.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_680.png", "text_plain_output_622.png", "text_plain_output_329.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_607.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_681.png", "text_plain_output_333.png", "text_plain_output_108.png", "text_plain_output_581.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "text_plain_output_503.png", "text_plain_output_578.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_658.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_469.png", "text_plain_output_24.png", "text_plain_output_357.png", "text_plain_output_21.png", "text_plain_output_344.png", "text_plain_output_104.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_623.png", "text_plain_output_466.png", "text_plain_output_568.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "text_plain_output_288.png", "text_plain_output_535.png", "text_plain_output_527.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_646.png", "text_plain_output_383.png", "text_plain_output_207.png", "text_plain_output_391.png", "text_plain_output_413.png", "text_plain_output_96.png", "text_plain_output_663.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_418.png", "text_plain_output_657.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_191.png", "text_plain_output_609.png", "text_plain_output_259.png", "text_plain_output_447.png", "text_plain_output_290.png", "text_plain_output_506.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "text_plain_output_113.png", "text_plain_output_371.png", "text_plain_output_479.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_368.png", "text_plain_output_667.png", "text_plain_output_372.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_454.png", "text_plain_output_487.png", "text_plain_output_595.png", "text_plain_output_643.png", "text_plain_output_338.png", "text_plain_output_575.png", "text_plain_output_197.png", "text_plain_output_512.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "text_plain_output_38.png", "text_plain_output_517.png", "text_plain_output_433.png", "text_plain_output_7.png", "text_plain_output_528.png", "text_plain_output_648.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "text_plain_output_513.png", "text_plain_output_314.png", "text_plain_output_592.png", "text_plain_output_410.png", "text_plain_output_432.png", "text_plain_output_645.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_497.png", "text_plain_output_174.png", "text_plain_output_212.png", "text_plain_output_652.png", "text_plain_output_644.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "text_plain_output_630.png", "text_plain_output_435.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_580.png", "text_plain_output_409.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_539.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_498.png", "text_plain_output_211.png", "text_plain_output_662.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_601.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_620.png", "text_plain_output_406.png", "text_plain_output_310.png", "text_plain_output_456.png", "text_plain_output_541.png", "text_plain_output_558.png", "text_plain_output_668.png", "text_plain_output_220.png", "text_plain_output_653.png", "text_plain_output_543.png", "text_plain_output_451.png", "text_plain_output_109.png", "text_plain_output_459.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_616.png", "text_plain_output_615.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_612.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_649.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_636.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_605.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_508.png", "text_plain_output_573.png", "text_plain_output_468.png", "text_plain_output_370.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_635.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "text_plain_output_23.png", "text_plain_output_610.png", "text_plain_output_173.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_632.png", "text_plain_output_51.png", "text_plain_output_677.png", "text_plain_output_626.png", "text_plain_output_450.png", "text_plain_output_252.png", "text_plain_output_296.png", "text_plain_output_525.png", "text_plain_output_672.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_537.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_602.png", "text_plain_output_246.png", "text_plain_output_2.png", "text_plain_output_569.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_500.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "text_plain_output_590.png", "text_plain_output_509.png", "text_plain_output_337.png", "text_plain_output_562.png", "text_plain_output_499.png", "text_plain_output_196.png", "text_plain_output_342.png", "text_plain_output_563.png", "text_plain_output_97.png", "text_plain_output_227.png", "text_plain_output_453.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_650.png", "text_plain_output_150.png", "text_plain_output_631.png", "text_plain_output_39.png", "text_plain_output_176.png", "text_plain_output_584.png", "text_plain_output_335.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_473.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_356.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_336.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_80.png", "text_plain_output_491.png", "text_plain_output_679.png", "text_plain_output_641.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_534.png", "text_plain_output_444.png", "text_plain_output_619.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_402.png", "text_plain_output_424.png", "text_plain_output_486.png", "text_plain_output_597.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_481.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_524.png", "text_plain_output_538.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_553.png", "text_plain_output_408.png", "text_plain_output_425.png", "text_plain_output_591.png", "text_plain_output_428.png", "text_plain_output_416.png", "text_plain_output_625.png", "text_plain_output_194.png", "text_plain_output_577.png", "text_plain_output_519.png", "text_plain_output_62.png", "text_plain_output_480.png", "text_plain_output_303.png", "text_plain_output_621.png", "text_plain_output_377.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "text_plain_output_464.png", "text_plain_output_156.png", "text_plain_output_547.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_587.png", "text_plain_output_448.png", "text_plain_output_364.png", "text_plain_output_365.png", "text_plain_output_61.png", "text_plain_output_585.png", "text_plain_output_352.png", "text_plain_output_83.png", "text_plain_output_374.png", "text_plain_output_647.png", "text_plain_output_472.png", "text_plain_output_566.png", "text_plain_output_397.png", "text_plain_output_600.png", "text_plain_output_661.png", "text_plain_output_389.png", "text_plain_output_292.png", "text_plain_output_351.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_574.png", "text_plain_output_582.png", "text_plain_output_306.png", "text_plain_output_675.png", "text_plain_output_493.png", "text_plain_output_46.png" ]
from keras.callbacks import EarlyStopping from keras.layers import Dense from keras.layers.core import Reshape, Flatten, Dropout from keras.models import Sequential from sklearn.preprocessing import normalize import numpy as np import pickle file = open('/kaggle/input/rml2016/RML2016.10b.dat', 'rb') Xd = pickle.load(file, encoding='bytes') snrs, mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1, 0]) X = [] lbl = [] for mod in mods: for snr in snrs: X.append(Xd[mod, snr]) for i in range(Xd[mod, snr].shape[0]): lbl.append((mod, snr)) X = np.vstack(X) file.close() features = {} features['raw'] = (X[:, 0], X[:, 1]) features['derivative'] = (normalize(np.gradient(X[:, 0], axis=1)), normalize(np.gradient(X[:, 1], axis=1))) features['integral'] = (normalize(np.cumsum(X[:, 0], axis=1)), normalize(np.cumsum(X[:, 1], axis=1))) def extract_features(*arguments): desired = () for arg in arguments: desired += features[arg] return np.stack(desired, axis=1) data = extract_features('raw') labels = np.array(lbl) in_shape = data[0].shape out_shape = tuple([1]) + in_shape np.random.seed(10) n_examples = labels.shape[0] r = np.random.choice(range(n_examples), n_examples, replace=False) train_examples = r[:n_examples // 2] test_examples = r[n_examples // 2:] X_train = data[train_examples] X_test = data[test_examples] y_train = LB().fit_transform(labels[train_examples][:, 0]) y_test = LB().fit_transform(labels[test_examples][:, 0]) snr_train = labels[train_examples][:, 1].astype(int) snr_test = labels[test_examples][:, 1].astype(int) model = Sequential() model.add(Dense(128, activation='relu', input_shape=in_shape)) model.add(Dense(256, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(X_train, y_train, epochs=100, validation_split=0.05, batch_size=2048, callbacks=[EarlyStopping(patience=15, restore_best_weights=True)])
code
130013764/cell_12
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers.core import Reshape, Flatten, Dropout from keras.models import Sequential from sklearn.preprocessing import normalize import numpy as np import pickle file = open('/kaggle/input/rml2016/RML2016.10b.dat', 'rb') Xd = pickle.load(file, encoding='bytes') snrs, mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1, 0]) X = [] lbl = [] for mod in mods: for snr in snrs: X.append(Xd[mod, snr]) for i in range(Xd[mod, snr].shape[0]): lbl.append((mod, snr)) X = np.vstack(X) file.close() features = {} features['raw'] = (X[:, 0], X[:, 1]) features['derivative'] = (normalize(np.gradient(X[:, 0], axis=1)), normalize(np.gradient(X[:, 1], axis=1))) features['integral'] = (normalize(np.cumsum(X[:, 0], axis=1)), normalize(np.cumsum(X[:, 1], axis=1))) def extract_features(*arguments): desired = () for arg in arguments: desired += features[arg] return np.stack(desired, axis=1) data = extract_features('raw') labels = np.array(lbl) in_shape = data[0].shape out_shape = tuple([1]) + in_shape model = Sequential() model.add(Dense(128, activation='relu', input_shape=in_shape)) model.add(Dense(256, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
code
32067131/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() sns.catplot(x='Spectral Class', y='Absolute magnitude(Mv)', data=star_data, hue='Star type Decoded', order=['O', 'B', 'A', 'F', 'G', 'K', 'M'], height=9) plt.gca().invert_yaxis()
code
32067131/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() star_data['Star color'] = star_data['Star color'].str.lower() star_data['Star color'] = star_data['Star color'].str.replace(' ', '') star_data['Star color'] = star_data['Star color'].str.replace('-', '') star_data['Star color'] = star_data['Star color'].str.replace('yellowwhite', 'whiteyellow') star_data['Star color'].value_counts()
code
32067131/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() star_data['Spectral Class'].value_counts()
code
32067131/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, StandardScaler from xgboost import XGBClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() x = star_data.select_dtypes(exclude='object').drop('Star type', axis=1) y = star_data['Star type'] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42, stratify=star_data['Star type']) scaler = StandardScaler() x_train_sc = scaler.fit_transform(x_train) x_test_sc = scaler.transform(x_test) x_train = pd.DataFrame(x_train_sc, index=x_train.index, columns=x_train.columns) x_test = pd.DataFrame(x_test_sc, index=x_test.index, columns=x_test.columns) xgb = XGBClassifier(n_estimators=1000, n_jobs=-1, random_state=42) xgb.fit(x_train, y_train) y_pred = xgb.predict(x_test) predictions = [round(value) for value in y_pred] accuracy = accuracy_score(y_test, predictions) print('Accuracy: %.2f%%' % (accuracy * 100.0))
code
32067131/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() star_data['Star color'].value_counts()
code
32067131/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() sns.pairplot(star_data.drop(['Star color', 'Spectral Class'], axis=1), hue='Star type Decoded', diag_kind=None) plt.show()
code
32067131/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from mpl_toolkits.mplot3d import Axes3D from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from xgboost import XGBClassifier import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32067131/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum()
code
32067131/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() le_specClass = LabelEncoder() star_data['SpecClassEnc'] = le_specClass.fit_transform(star_data['Spectral Class']) print('Encoded Spectral Classes: ' + str(le_specClass.classes_))
code
32067131/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.head()
code
32067131/cell_17
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) star_data = pd.read_csv('../input/star-dataset/6 class csv.csv') star_data.isnull().sum() le_starCol = LabelEncoder() star_data['StarColEnc'] = le_starCol.fit_transform(star_data['Star color']) print('Encoded Star colors: ' + str(le_starCol.classes_))
code
74046055/cell_6
[ "text_plain_output_1.png" ]
images = Path('/kaggle/input/blood-cells/dataset2-master/dataset2-master/images/') data = ImageDataLoaders.from_folder(path=images, train='TRAIN', valid='TEST', seed=42, item_tfms=RandomResizedCrop(224, min_scale=0.4), batch_tfms=aug_transforms(mult=2), bs=32) print(data.vocab)
code
74046055/cell_2
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import fastai from fastai.vision.all import * import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74046055/cell_1
[ "text_plain_output_1.png" ]
!pip install git+https://github.com/fastai/fastai2 !pip install git+https://github.com/fastai/fastcore
code
74046055/cell_7
[ "text_html_output_1.png" ]
images = Path('/kaggle/input/blood-cells/dataset2-master/dataset2-master/images/') data = ImageDataLoaders.from_folder(path=images, train='TRAIN', valid='TEST', seed=42, item_tfms=RandomResizedCrop(224, min_scale=0.4), batch_tfms=aug_transforms(mult=2), bs=32) learn = cnn_learner(data, resnet34, metrics=error_rate) learn.fit_one_cycle(4)
code
74046055/cell_8
[ "image_output_1.png" ]
images = Path('/kaggle/input/blood-cells/dataset2-master/dataset2-master/images/') data = ImageDataLoaders.from_folder(path=images, train='TRAIN', valid='TEST', seed=42, item_tfms=RandomResizedCrop(224, min_scale=0.4), batch_tfms=aug_transforms(mult=2), bs=32) learn = cnn_learner(data, resnet34, metrics=error_rate) learn.fit_one_cycle(4) interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix()
code
74046055/cell_5
[ "image_output_1.png" ]
images = Path('/kaggle/input/blood-cells/dataset2-master/dataset2-master/images/') data = ImageDataLoaders.from_folder(path=images, train='TRAIN', valid='TEST', seed=42, item_tfms=RandomResizedCrop(224, min_scale=0.4), batch_tfms=aug_transforms(mult=2), bs=32) data.train.show_batch()
code
74057859/cell_4
[ "text_plain_output_1.png" ]
import random s = {('Hello', 'Hi', 'Howdy'), ('Salam', 'Namaste', 'Marhabaan')} e3 = ('NiHao', 'Konnichiwa', 'Yeoboseyo') s.add(e3) import random el = random.sample(s, 1)[0] s.remove(el) print(s)
code
74057859/cell_2
[ "text_plain_output_1.png" ]
s = {('Hello', 'Hi', 'Howdy'), ('Salam', 'Namaste', 'Marhabaan')} e3 = ('NiHao', 'Konnichiwa', 'Yeoboseyo') s.add(e3) for item in s: print(item)
code
74057859/cell_3
[ "text_plain_output_1.png" ]
s = {('Hello', 'Hi', 'Howdy'), ('Salam', 'Namaste', 'Marhabaan')} e3 = ('NiHao', 'Konnichiwa', 'Yeoboseyo') s.add(e3) my_list = list(s) final = [my_list[i] for i in (0, -1)] print(final)
code
74057859/cell_5
[ "text_plain_output_1.png" ]
import random s = {('Hello', 'Hi', 'Howdy'), ('Salam', 'Namaste', 'Marhabaan')} e3 = ('NiHao', 'Konnichiwa', 'Yeoboseyo') s.add(e3) import random el = random.sample(s, 1)[0] s.remove(el) s.remove(('Salam', 'Namaste', 'Marhabaan')) print(s)
code
106204184/cell_13
[ "text_plain_output_1.png" ]
i = 0 while i < 6: i += 2 i = 1 while True: if i % 9 == 0: break print(i + 4) i += 2
code
106204184/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np def square(a): sq = a * a return sq square(6)
code
106204184/cell_4
[ "text_plain_output_1.png" ]
for i in range(0, 100): print('Raiyaan')
code
106204184/cell_6
[ "text_plain_output_1.png" ]
for i in range(0, 100, 25): print('RAIYAAN')
code
106204184/cell_2
[ "text_plain_output_1.png" ]
num = 50 if num > 50: if num % 2 == 0: print('No is even and greater than 50') else: print('No is not even and greater than 50') elif num % 2 != 0: print('No is odd and less than 50') else: print('No is not odd and less than 50')
code
106204184/cell_11
[ "text_plain_output_1.png" ]
fact = 1 for i in range(1, 11): fact = fact * i import pandas as pd import numpy as np def factorial(value): fact = 1 for i in range(1, value + 1): fact = fact * i return fact n = 5 r = 3 c = factorial(n) / (factorial(r) * factorial(n - r)) def fun(n, l=[]): for i in range(n): l.append(i * i) print(l) fun(2)
code
106204184/cell_8
[ "text_plain_output_1.png" ]
fact = 1 for i in range(1, 11): fact = fact * i print(fact)
code
106204184/cell_15
[ "text_plain_output_1.png" ]
def func(x, y): if x > y: return x elif x == y: return (x, y) else: return y print(func(20, 30))
code
106204184/cell_14
[ "text_plain_output_1.png" ]
string_1 = 'internshala' for i in range(len(string_1)): print(string_1) string_1 = 'z'
code
106204184/cell_10
[ "text_plain_output_1.png" ]
fact = 1 for i in range(1, 11): fact = fact * i import pandas as pd import numpy as np def factorial(value): fact = 1 for i in range(1, value + 1): fact = fact * i return fact n = 5 r = 3 c = factorial(n) / (factorial(r) * factorial(n - r)) print('No of combination = ' + str(c))
code
106204184/cell_12
[ "text_plain_output_1.png" ]
i = 0 while i < 6: print(i) i += 2 else: print(0)
code
106204184/cell_5
[ "text_plain_output_1.png" ]
for i in range(75, 100): print('Raiyaan')
code
50239477/cell_42
[ "text_html_output_1.png" ]
seed = 27912 TX_train.sample(5, random_state=seed)
code
50239477/cell_56
[ "text_html_output_1.png" ]
import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) seed = 27912 filepath = '../input/breast-cancer-wisconsin-data/data.csv' indexC = 'id' targetC = 'diagnosis' dataC = utils.load_data(filepath, indexC, targetC) dataC.sample(5, random_state=seed) filepathD = '../input/pima-indians-diabetes-database/diabetes.csv' targetD = 'Outcome' dataD = utils.pd.read_csv(filepathD, dtype={'Outcome': 'category'}) dataD.sample(5, random_state=seed) filepathT_train = '../input/titanic/train.csv' filepathT_test = '../input/titanic/test.csv' filepathT_Union = '../input/titanic/gender_submission.csv' dataT_train = utils.pd.read_csv(filepathT_train) dataT_test = utils.pd.read_csv(filepathT_test) dataT_Union = utils.pd.read_csv(filepathT_Union) dataT = pd.DataFrame(columns=['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], index=range(len(dataT_test) + len(dataT_train) + 1)) w = [] for i in range(len(dataT_train)): k = i + 1 dataT.iloc[k]['PassengerId'] = dataT_train.iloc[i]['PassengerId'] dataT.iloc[k]['Survived'] = dataT_train.iloc[i]['Survived'] dataT.iloc[k]['Pclass'] = dataT_train.iloc[i]['Pclass'] dataT.iloc[k]['Name'] = dataT_train.iloc[i]['Name'] dataT.iloc[k]['Sex'] = dataT_train.iloc[i]['Sex'] dataT.iloc[k]['Age'] = dataT_train.iloc[i]['Age'] dataT.iloc[k]['SibSp'] = dataT_train.iloc[i]['SibSp'] dataT.iloc[k]['Parch'] = dataT_train.iloc[i]['Parch'] dataT.iloc[k]['Ticket'] = dataT_train.iloc[i]['Ticket'] dataT.iloc[k]['Fare'] = dataT_train.iloc[i]['Fare'] dataT.iloc[k]['Cabin'] = dataT_train.iloc[i]['Cabin'] dataT.iloc[k]['Embarked'] = dataT_train.iloc[i]['Embarked'] for j in range(len(dataT_test)): i = j + len(dataT_train) + 1 dataT.iloc[i]['PassengerId'] = dataT_test.iloc[j]['PassengerId'] dataT.iloc[i]['Survived'] = dataT_Union.iloc[j]['Survived'] dataT.iloc[i]['Pclass'] = dataT_test.iloc[j]['Pclass'] dataT.iloc[i]['Name'] = dataT_test.iloc[j]['Name'] dataT.iloc[i]['Sex'] = dataT_test.iloc[j]['Sex'] dataT.iloc[i]['Age'] = dataT_test.iloc[j]['Age'] dataT.iloc[i]['SibSp'] = dataT_test.iloc[j]['SibSp'] dataT.iloc[i]['Parch'] = dataT_test.iloc[j]['Parch'] dataT.iloc[i]['Ticket'] = dataT_test.iloc[j]['Ticket'] dataT.iloc[i]['Fare'] = dataT_test.iloc[j]['Fare'] dataT.iloc[i]['Cabin'] = dataT_test.iloc[j]['Cabin'] dataT.iloc[i]['Embarked'] = dataT_test.iloc[j]['Embarked'] dataT = dataT.drop([0], axis=0) dataT = dataT.drop(['PassengerId'], axis=1) dataT.sample(5, random_state=seed) CX, Cy = utils.divide_dataset(dataC, target='diagnosis') DX, Dy = utils.divide_dataset(dataD, target='Outcome') TX, Ty = utils.divide_dataset(dataT, target='Survived') CX_train.sample(5, random_state=seed) Cy_train.sample(5, random_state=seed) CX_test.sample(5, random_state=seed) Cy_test.sample(5, random_state=seed) dataC_train = utils.join_dataset(CX_train, Cy_train) dataC_test = utils.join_dataset(CX_test, Cy_test) dataC_test.sample(5, random_state=seed)
code
50239477/cell_34
[ "text_plain_output_1.png" ]
seed = 27912 Cy.sample(5, random_state=seed)
code
50239477/cell_33
[ "text_html_output_1.png" ]
seed = 27912 TX.sample(5, random_state=seed)
code
50239477/cell_44
[ "text_plain_output_1.png" ]
seed = 27912 Cy_train.sample(5, random_state=seed)
code
50239477/cell_20
[ "text_plain_output_1.png" ]
import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils filepath = '../input/breast-cancer-wisconsin-data/data.csv' indexC = 'id' targetC = 'diagnosis' dataC = utils.load_data(filepath, indexC, targetC) filepathD = '../input/pima-indians-diabetes-database/diabetes.csv' targetD = 'Outcome' dataD = utils.pd.read_csv(filepathD, dtype={'Outcome': 'category'}) filepathT_train = '../input/titanic/train.csv' filepathT_test = '../input/titanic/test.csv' filepathT_Union = '../input/titanic/gender_submission.csv' dataT_train = utils.pd.read_csv(filepathT_train) dataT_test = utils.pd.read_csv(filepathT_test) dataT_Union = utils.pd.read_csv(filepathT_Union) len(dataT_Union) len(dataT_test)
code
50239477/cell_40
[ "text_html_output_1.png" ]
seed = 27912 DX_train.sample(5, random_state=seed)
code
50239477/cell_48
[ "text_html_output_1.png" ]
seed = 27912 TX_test.sample(5, random_state=seed)
code
50239477/cell_41
[ "text_html_output_1.png" ]
seed = 27912 CX_train.sample(5, random_state=seed)
code
50239477/cell_2
[ "text_html_output_1.png" ]
from sklearn.dummy import DummyClassifier from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import KBinsDiscretizer from sklearn.tree import DecisionTreeClassifier import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils
code
50239477/cell_54
[ "text_html_output_1.png" ]
import miner_a_de_datos_an_lisis_exploratorio_utilidad as utils import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) seed = 27912 filepath = '../input/breast-cancer-wisconsin-data/data.csv' indexC = 'id' targetC = 'diagnosis' dataC = utils.load_data(filepath, indexC, targetC) dataC.sample(5, random_state=seed) filepathD = '../input/pima-indians-diabetes-database/diabetes.csv' targetD = 'Outcome' dataD = utils.pd.read_csv(filepathD, dtype={'Outcome': 'category'}) dataD.sample(5, random_state=seed) filepathT_train = '../input/titanic/train.csv' filepathT_test = '../input/titanic/test.csv' filepathT_Union = '../input/titanic/gender_submission.csv' dataT_train = utils.pd.read_csv(filepathT_train) dataT_test = utils.pd.read_csv(filepathT_test) dataT_Union = utils.pd.read_csv(filepathT_Union) dataT = pd.DataFrame(columns=['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], index=range(len(dataT_test) + len(dataT_train) + 1)) w = [] for i in range(len(dataT_train)): k = i + 1 dataT.iloc[k]['PassengerId'] = dataT_train.iloc[i]['PassengerId'] dataT.iloc[k]['Survived'] = dataT_train.iloc[i]['Survived'] dataT.iloc[k]['Pclass'] = dataT_train.iloc[i]['Pclass'] dataT.iloc[k]['Name'] = dataT_train.iloc[i]['Name'] dataT.iloc[k]['Sex'] = dataT_train.iloc[i]['Sex'] dataT.iloc[k]['Age'] = dataT_train.iloc[i]['Age'] dataT.iloc[k]['SibSp'] = dataT_train.iloc[i]['SibSp'] dataT.iloc[k]['Parch'] = dataT_train.iloc[i]['Parch'] dataT.iloc[k]['Ticket'] = dataT_train.iloc[i]['Ticket'] dataT.iloc[k]['Fare'] = dataT_train.iloc[i]['Fare'] dataT.iloc[k]['Cabin'] = dataT_train.iloc[i]['Cabin'] dataT.iloc[k]['Embarked'] = dataT_train.iloc[i]['Embarked'] for j in range(len(dataT_test)): i = j + len(dataT_train) + 1 dataT.iloc[i]['PassengerId'] = dataT_test.iloc[j]['PassengerId'] dataT.iloc[i]['Survived'] = dataT_Union.iloc[j]['Survived'] dataT.iloc[i]['Pclass'] = dataT_test.iloc[j]['Pclass'] dataT.iloc[i]['Name'] = dataT_test.iloc[j]['Name'] dataT.iloc[i]['Sex'] = dataT_test.iloc[j]['Sex'] dataT.iloc[i]['Age'] = dataT_test.iloc[j]['Age'] dataT.iloc[i]['SibSp'] = dataT_test.iloc[j]['SibSp'] dataT.iloc[i]['Parch'] = dataT_test.iloc[j]['Parch'] dataT.iloc[i]['Ticket'] = dataT_test.iloc[j]['Ticket'] dataT.iloc[i]['Fare'] = dataT_test.iloc[j]['Fare'] dataT.iloc[i]['Cabin'] = dataT_test.iloc[j]['Cabin'] dataT.iloc[i]['Embarked'] = dataT_test.iloc[j]['Embarked'] dataT = dataT.drop([0], axis=0) dataT = dataT.drop(['PassengerId'], axis=1) dataT.sample(5, random_state=seed) CX, Cy = utils.divide_dataset(dataC, target='diagnosis') DX, Dy = utils.divide_dataset(dataD, target='Outcome') TX, Ty = utils.divide_dataset(dataT, target='Survived') CX_train.sample(5, random_state=seed) Cy_train.sample(5, random_state=seed) dataC_train = utils.join_dataset(CX_train, Cy_train) dataC_train.sample(5, random_state=seed)
code
50239477/cell_50
[ "text_plain_output_1.png" ]
seed = 27912 Cy_test.sample(5, random_state=seed)
code
50239477/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50239477/cell_45
[ "text_plain_output_1.png" ]
seed = 27912 Ty_train.sample(5, random_state=seed)
code
50239477/cell_49
[ "text_plain_output_1.png" ]
seed = 27912 Dy_test.sample(5, random_state=seed)
code
50239477/cell_32
[ "text_html_output_1.png" ]
seed = 27912 DX.sample(5, random_state=seed)
code
50239477/cell_51
[ "text_plain_output_1.png" ]
seed = 27912 Ty_test.sample(5, random_state=seed)
code