path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128047268/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
GM_df.info() | code |
128047268/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVR
from xgboost import XGBRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
Mining_df['Reading No'] = Mining_df['Reading No'].astype(str)
dummies_GM = pd.get_dummies(GM_df['Type'])
GM_df = pd.concat([GM_df, dummies_GM], axis=1)
GM_df.columns
dummies_Mining = pd.get_dummies(Mining_df['Type'])
Mining_df = pd.concat([Mining_df, dummies_Mining], axis=1)
Mining_df.columns
Mining_df[['A', 'B', 'C', 'D']] = Mining_df[['A', 'B', 'C', 'D']].astype(int)
from sklearn.model_selection import train_test_split
y = Mining_df.Level
features = ['Cu', 'Sn', 'Pb', 'P', 'S', 'Cl']
X = Mining_df[features]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn.linear_model import LinearRegression
M_lrmodel = LinearRegression()
M_lrmodel.fit(X_train, y_train)
y_lrpred = M_lrmodel.predict(X_test)
from sklearn.svm import SVR
M_svrmodel = SVR(kernel='rbf')
M_svrmodel.fit(X_train, y_train)
y_svrpred = M_svrmodel.predict(X_test)
from sklearn.neighbors import KNeighborsRegressor
M_knnmodel = KNeighborsRegressor(n_neighbors=1)
M_knnmodel.fit(X_train, y_train)
y_knnpred = M_knnmodel.predict(X_test)
from sklearn.ensemble import RandomForestRegressor
M_rfmodel = RandomForestRegressor(n_estimators=20, random_state=0)
M_rfmodel.fit(X_train, y_train)
y_rfpred = M_rfmodel.predict(X_test)
from xgboost import XGBRegressor
M_xgbmodel = XGBRegressor()
M_xgbmodel.fit(X_train, y_train)
y_xgbpred = M_xgbmodel.predict(X_test)
from sklearn.metrics import r2_score
svr_R = metrics.r2_score(y_test, y_svrpred)
svr_a_R = 1 - (1 - svr_R) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1)
print('Adjusted R Squared Value for SVR: ', round(svr_a_R, 3))
lr_R = r2_score(y_test, y_lrpred)
lr_a_R = 1 - (1 - lr_R) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1)
print('Adjusted R Squared Value for Linear Regression: ', round(lr_a_R, 3))
rf_R = metrics.r2_score(y_test, y_rfpred)
rf_a_R = 1 - (1 - rf_R) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1)
print('Adjusted R Squared Value for Random Forest: ', round(rf_a_R, 3))
knn_R = metrics.r2_score(y_test, y_knnpred)
knn_a_R = 1 - (1 - knn_R) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1)
print('Adjusted R Squared Value for KNN: ', round(knn_a_R, 3))
xgb_R = metrics.r2_score(y_test, y_xgbpred)
xgb_a_R = 1 - (1 - xgb_R) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1)
print('Adjusted R Squared Value for XGBoost: ', round(xgb_a_R, 3)) | code |
128047268/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
dummies_GM = pd.get_dummies(GM_df['Type'])
GM_df = pd.concat([GM_df, dummies_GM], axis=1)
GM_df.columns | code |
128047268/cell_2 | [
"text_plain_output_1.png"
] | import seaborn as sns
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
sns.set()
import matplotlib.pyplot as plt
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import train_test_split
from scipy.stats import boxcox
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
py.offline.init_notebook_mode(connected = True)
!pip install openpyxl | code |
128047268/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128047268/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
Mining_df['Reading No'] = Mining_df['Reading No'].astype(str)
dummies_GM = pd.get_dummies(GM_df['Type'])
GM_df = pd.concat([GM_df, dummies_GM], axis=1)
GM_df.columns
dummies_Mining = pd.get_dummies(Mining_df['Type'])
Mining_df = pd.concat([Mining_df, dummies_Mining], axis=1)
Mining_df.columns | code |
128047268/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
Mining_df['Reading No'] = Mining_df['Reading No'].astype(str)
dummies_GM = pd.get_dummies(GM_df['Type'])
GM_df = pd.concat([GM_df, dummies_GM], axis=1)
GM_df.columns
dummies_Mining = pd.get_dummies(Mining_df['Type'])
Mining_df = pd.concat([Mining_df, dummies_Mining], axis=1)
Mining_df.columns
Mining_df[['A', 'B', 'C', 'D']] = Mining_df[['A', 'B', 'C', 'D']].astype(int)
Mining_df.info() | code |
128047268/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df.info() | code |
128047268/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
Mining_df['Reading No'] = Mining_df['Reading No'].astype(str)
dummies_GM = pd.get_dummies(GM_df['Type'])
GM_df = pd.concat([GM_df, dummies_GM], axis=1)
GM_df.columns
dummies_Mining = pd.get_dummies(Mining_df['Type'])
Mining_df = pd.concat([Mining_df, dummies_Mining], axis=1)
Mining_df.columns
Mining_df[['A', 'B', 'C', 'D']] = Mining_df[['A', 'B', 'C', 'D']].astype(int)
corr_Mining = Mining_df[['Level', 'Type','A','B','C','D', 'Sn', 'Pb', 'Cu', 'P', 'Cl', 'S']].corr()
f,ax = plt.subplots(figsize=(9, 9))
mask = np.zeros_like(corr_Mining, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr_Mining, mask=mask, cmap=cmap, square=True, annot=True, linewidths=.5, fmt= '.1f',ax=ax)
plt.figure(figsize=(12, 5))
sns.distplot(Mining_df['Level']) | code |
128047268/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
Mining_df['Reading No'] = Mining_df['Reading No'].astype(str)
Mining_df.info() | code |
130002946/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier | code |
130002946/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130002946/cell_7 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
x = df.iloc[:, :-1]
y = df.iloc[:, 13]
x = x.drop(['Cabin', 'Name', 'PassengerId'], axis='columns')
lvl = LabelEncoder()
x['CryoSleep'] = lvl.fit_transform(x['CryoSleep'])
x['VIP'] = lvl.fit_transform(x['VIP'])
dummies = pd.get_dummies(x['HomePlanet'])
x = pd.concat([x, dummies], axis='columns')
dummies = pd.get_dummies(x['Destination'])
x = pd.concat([x, dummies], axis='columns')
x = x.drop(['HomePlanet', 'Destination'], axis='columns')
x.isnull().sum()
null_having = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mode_values = x[null_having].mode().iloc[0]
mode_values
x[null_having] = x[null_having].fillna(mode_values)
x
df | code |
130002946/cell_3 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
x = df.iloc[:, :-1]
y = df.iloc[:, 13]
x = x.drop(['Cabin', 'Name', 'PassengerId'], axis='columns')
lvl = LabelEncoder()
x['CryoSleep'] = lvl.fit_transform(x['CryoSleep'])
x['VIP'] = lvl.fit_transform(x['VIP'])
dummies = pd.get_dummies(x['HomePlanet'])
x = pd.concat([x, dummies], axis='columns')
dummies = pd.get_dummies(x['Destination'])
x = pd.concat([x, dummies], axis='columns')
x = x.drop(['HomePlanet', 'Destination'], axis='columns')
x.isnull().sum()
null_having = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']
mode_values = x[null_having].mode().iloc[0]
mode_values
x[null_having] = x[null_having].fillna(mode_values)
x | code |
130000797/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/heart-disease-dataset/heart.csv')
df.dtypes
df.isnull().sum() | code |
130000797/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import skew, norm
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier | code |
130000797/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | !pip install dataprep
from dataprep.eda import plot, plot_missing, plot_correlation, plot_diff, create_report | code |
130000797/cell_3 | [
"text_plain_output_1.png"
] | import os
import os
print(os.listdir('/kaggle/input/')) | code |
130000797/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/heart-disease-dataset/heart.csv')
df.dtypes | code |
50221063/cell_6 | [
"text_plain_output_1.png"
] | def insertionSort(array):
for step in range(1, len(array)):
key = array[step]
j = step - 1
while j >= 0 and key < array[j]:
array[j + 1] = array[j]
j = j - 1
array[j + 1] = key
data = [10, 5, 30, 15, 50, 6, 25]
insertionSort(data)
def selectionSort(array, size):
for step in range(size):
min_idx = step
for i in range(step + 1, size):
if array[i] < array[min_idx]:
min_idx = i
array[step], array[min_idx] = (array[min_idx], array[step])
data = [10, 5, 30, 15, 50, 6, 25]
size = len(data)
selectionSort(data, size)
print('Sorted Array in Ascending Order:')
print(data) | code |
50221063/cell_3 | [
"text_plain_output_1.png"
] | def insertionSort(array):
for step in range(1, len(array)):
key = array[step]
j = step - 1
while j >= 0 and key < array[j]:
array[j + 1] = array[j]
j = j - 1
array[j + 1] = key
data = [10, 5, 30, 15, 50, 6, 25]
insertionSort(data)
print('Sorted Array in Ascending Order:')
print(data) | code |
18156269/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes.csv')
data.head() | code |
18156269/cell_2 | [
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
import seaborn as sns
import os
print(os.listdir('../input')) | code |
18156269/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes.csv')
data.hist(figsize=(16, 14)) | code |
18156269/cell_18 | [
"image_output_1.png"
] | from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes.csv')
correlations = data.corr()
correlations = data.corr()
# plot correlation matrix
fig = plt.figure(figsize=(16,14))
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,9,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(data.columns)
ax.set_yticklabels(data.columns)
plt.show()
scatter_matrix(data, figsize=(16, 14))
plt.show() | code |
18156269/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes.csv')
correlations = data.corr()
correlations = data.corr()
fig = plt.figure(figsize=(16, 14))
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0, 9, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(data.columns)
ax.set_yticklabels(data.columns)
plt.show() | code |
18156269/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes.csv')
data.plot(kind='density', subplots=True, layout=(3, 3), sharex=False, figsize=(16, 14))
plt.show() | code |
18156269/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes.csv')
data.plot(kind='box', subplots=True, layout=(3, 3), sharex=False, sharey=False, figsize=(16, 14))
plt.show() | code |
88101304/cell_42 | [
"text_plain_output_1.png"
] | print(f'Round 1:\n{GRPC_R1_M1}\n{GRPC_R1_M2}\n')
print(f'Round 2:\n{GRPC_R2_M1}\n{GRPC_R2_M2}\n')
print(f'Round 3:\n{GRPC_R3_M1}\n{GRPC_R3_M2}') | code |
88101304/cell_21 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from datetime import datetime, timedelta
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
import numpy as np
import pandas as pd
import tensorflow as tf
REF_DATE_STR = '2021-10-08 06:00:00+00:00'
RANDOM_SEED = 42069
tf.random.set_seed(RANDOM_SEED)
def treatment_by_players(ref_date_str: str):
"""Pretreatment pipeline to build a dataframe set for models by players
:param ref_date_str: A reference date as string, to put a weight on matches based to how old those games are
:return: Dataset formatted for modeling and players name and ID database.
"""
ref_date = datetime.strptime(ref_date_str, '%Y-%m-%d %H:%M:%S%z')
players_df = pd.read_csv('/kaggle/input/rlcs-202122/by_players.csv', low_memory=False, encoding='utf8')
general_df = pd.read_csv('/kaggle/input/rlcs-202122/general.csv', low_memory=False, encoding='utf8')
players_df = players_df.rename(columns={'name': 'team'}).rename(columns=lambda x: x[2:] if x.startswith('p_') else x)
general_df = general_df.loc[:, ['ballchasing_id', 'correction', 'region', 'split', 'event', 'phase', 'stage', 'round', 'date', 'duration', 'overtime', 'overtime_seconds']]
players_df = players_df.drop(['start_time', 'end_time', 'mvp', 'car_id'], axis=1)
dataframe = general_df.merge(players_df)
dataframe = dataframe.loc[~dataframe.correction].drop('correction', axis=1)
results = dataframe.loc[:, ['ballchasing_id', 'color', 'core_mvp']].drop_duplicates().groupby(['ballchasing_id', 'color'], as_index=False).mean()
results = results.loc[results.core_mvp > 0].drop('core_mvp', axis=1).rename(columns={'color': 'win'})
dataframe = dataframe.merge(results)
dataframe.win = np.where(dataframe.color == dataframe.win, 1, 0)
dataframe.platform_id = dataframe['platform'] + '_' + dataframe['platform_id'].astype(str)
dataframe = dataframe.drop(['platform'], axis=1)
dataframe.date = pd.to_datetime(dataframe.date, utc=True)
dataframe.date = (dataframe.date - ref_date) / np.timedelta64(1, 'D')
dataframe = dataframe.rename(columns={'date': 'since_ref_date'})
dataframe.overtime_seconds = dataframe.overtime_seconds.fillna(0)
players_db = dataframe.loc[:, ['team', 'name', 'platform_id', 'since_ref_date']].sort_values('since_ref_date', ascending=False).drop_duplicates(subset=['platform_id'], keep='first').reset_index(drop=True).sort_values(['team', 'name']).drop('since_ref_date', axis=1)
df_reduced = dataframe.loc[:, ['ballchasing_id', 'color', 'team', 'platform_id', 'core_score']]
bl_side = df_reduced.loc[df_reduced.color == 'blue'].rename(columns={'platform_id': 'id_list'}).sort_values(['ballchasing_id', 'core_score'], ascending=False).groupby(['ballchasing_id', 'color', 'team'])['id_list'].apply(list).reset_index()
or_side = df_reduced.loc[df_reduced.color == 'orange'].rename(columns={'platform_id': 'id_list'}).sort_values(['ballchasing_id', 'core_score'], ascending=False).groupby(['ballchasing_id', 'color', 'team'])['id_list'].apply(list).reset_index()
bl_teammates_list_v1 = df_reduced.loc[df_reduced.color == 'blue', ['ballchasing_id', 'platform_id']].merge(bl_side.drop('team', axis=1))
bl_teammates_ex = bl_teammates_list_v1.explode('id_list').reset_index(drop=True)
bl_teammates_list_v2 = bl_teammates_ex[bl_teammates_ex.id_list != bl_teammates_ex.platform_id].groupby(['ballchasing_id', 'platform_id'])['id_list'].apply(list).reset_index()
bl_teammates = pd.concat([bl_teammates_list_v2.loc[:, ['ballchasing_id', 'platform_id']], bl_teammates_list_v2.id_list.apply(pd.Series)], axis=1).rename(columns={0: 'teammate_1', 1: 'teammate_2'})
or_teammates_list_v1 = df_reduced.loc[df_reduced.color == 'orange', ['ballchasing_id', 'platform_id']].merge(or_side.drop('team', axis=1))
or_teammates_ex = or_teammates_list_v1.explode('id_list').reset_index(drop=True)
or_teammates_list_v2 = or_teammates_ex[or_teammates_ex.id_list != or_teammates_ex.platform_id].groupby(['ballchasing_id', 'platform_id'])['id_list'].apply(list).reset_index()
or_teammates = pd.concat([or_teammates_list_v2.loc[:, ['ballchasing_id', 'platform_id']], or_teammates_list_v2.id_list.apply(pd.Series)], axis=1).rename(columns={0: 'teammate_1', 1: 'teammate_2'})
teammates = pd.concat([or_teammates, bl_teammates])
bl_as_opponent_series = bl_side.id_list.apply(pd.Series)
bl_as_opponent = bl_side.merge(bl_as_opponent_series, left_index=True, right_index=True).drop('id_list', axis=1).rename(columns={0: 'opponent_1', 1: 'opponent_2', 2: 'opponent_3', 'team': 'opponent_team'}).replace({'color': {'blue': 'orange'}})
or_as_opponent_series = or_side.id_list.apply(pd.Series)
or_as_opponent = or_side.merge(or_as_opponent_series, left_index=True, right_index=True).drop('id_list', axis=1).rename(columns={0: 'opponent_1', 1: 'opponent_2', 2: 'opponent_3', 'team': 'opponent_team'}).replace({'color': {'orange': 'blue'}})
opps = pd.concat([or_as_opponent, bl_as_opponent])
dataframe = dataframe.merge(teammates, how='outer').merge(opps)
dataframe = dataframe.drop(['ballchasing_id', 'name', 'color'], axis=1)
dataframe.overtime = np.where(dataframe.overtime, 1, 0)
dataframe.core_mvp = np.where(dataframe.core_mvp, 1, 0)
return (dataframe, players_db)
def model_pretreatment(original_data: pd.DataFrame, split_data: pd.DataFrame):
"""Prepare raw data for further treatments
:param original_data: original dataframe for fit step application and to get columns by type
:param split_data: split / sample data resulting from sklearn 'train_test_split' or K-folds operation
:return split_data_final: formatted data as numpy array.
"""
num_cols = original_data.select_dtypes(include=np.number).columns.to_list()
cat_cols = original_data.select_dtypes(exclude=np.number).columns.to_list()
split_data_cat = OneHotEncoder(handle_unknown='ignore').fit(original_data[cat_cols]).transform(split_data[cat_cols]).toarray()
split_data_scaled = StandardScaler().fit(original_data[num_cols]).transform(split_data[num_cols])
split_data_final = np.concatenate((split_data_cat, split_data_scaled), axis=1)
return split_data_final
def compile_model(train: np.array, train_target: np.array, validation: np.array, val_target: np.array, batch_size: float, alpha: float=0.01, es_rate: float=0.2, epochs: int=100, workers: int=1, verbose: bool=True):
"""Compile and fit a keras model
:param train: train array
:param train_target: target array
:param validation: validation array
:param val_target: validation target array
:param batch_size: number of samples to work through before updating the internal model parameters
:param alpha: initial learning rate
:param es_rate: early stopping rate, epochs percentage to set early stopping
:param epochs: number times that the learning algorithm will work through the entire training dataset
:param workers: maximum number of processes to spin up when using process-based threading
:param verbose: to display progress or not
:return: model fitted (Keras model + Keras History).
"""
if es_rate > 1:
es_rate = 1
elif es_rate < 0:
es_rate = 0.2
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=es_rate * epochs, mode='min', restore_best_weights=True)
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath='/kaggle/working/tmp_mdl.hdf5', monitor='val_accuracy', save_weights_only=True, mode='max', save_best_only=True)
model = tf.keras.Sequential([tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss=tf.keras.losses.binary_crossentropy, optimizer=tf.keras.optimizers.Adam(learning_rate=alpha), metrics=tf.keras.metrics.BinaryAccuracy(name='accuracy'))
history = model.fit(train, train_target, callbacks=[early_stopping, checkpoint], batch_size=batch_size, epochs=epochs, validation_data=(validation, val_target), verbose=verbose, workers=workers)
model.load_weights('/kaggle/working/tmp_mdl.hdf5')
return (model, history)
def compil_best_model(x: np.array, y: np.array, epochs: int, es_rate: float, batch_size: int, alpha: float, workers: int=1, verbose: bool=True):
"""Compil best model (the best combination of batch size & alpha) with Keras model implemented in 'compile_model' function
:param x: training instances to class
:param y: target array relative to x
:param epochs: number times that the learning algorithm will work through the entire training dataset
:param es_rate: early stopping rate, epochs percentage to set early stopping
:param batch_size: batch size, number of samples to work through before updating the internal model parameters
:param alpha: initial learning rate
:param workers: maximum number of processes to spin up when using process-based threading
:param verbose: to display progress or not
:return model, model_history: Keras model and model's history
"""
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=1 / 3, random_state=RANDOM_SEED, stratify=y)
new_x_train = model_pretreatment(original_data=x, split_data=x_train)
new_x_val = model_pretreatment(original_data=x, split_data=x_val)
model, model_history = compile_model(train=new_x_train, train_target=y_train, validation=new_x_val, val_target=y_val, batch_size=int(batch_size), alpha=alpha, epochs=epochs, es_rate=es_rate, verbose=verbose, workers=workers)
return (model, model_history)
DF_GAMES, PLAYERS_DB = treatment_by_players(ref_date_str=REF_DATE_STR)
DATA = DF_GAMES.drop('win', axis=1)
TARGET = DF_GAMES.win
BEST_SETTINGS = {'batch_size': 64, 'init_alpha': 1e-06}
MY_MODEL, _ = compil_best_model(x=DATA, y=TARGET, epochs=1000, es_rate=0.1, batch_size=BEST_SETTINGS['batch_size'], alpha=BEST_SETTINGS['init_alpha']) | code |
88101304/cell_32 | [
"text_plain_output_1.png"
] | print(f'Round 1:\n{GRPA_R1_M1}\n{GRPA_R1_M2}\n')
print(f'Round 2:\n{GRPA_R2_M1}\n{GRPA_R2_M2}\n')
print(f'Round 3:\n{GRPA_R3_M1}\n{GRPA_R3_M2}') | code |
88101304/cell_47 | [
"text_plain_output_1.png"
] | print(f'Round 1:\n{GRPD_R1_M1}\n{GRPD_R1_M2}\n')
print(f'Round 2:\n{GRPD_R2_M1}\n{GRPD_R2_M2}\n')
print(f'Round 3:\n{GRPD_R3_M1}\n{GRPD_R3_M2}') | code |
88101304/cell_37 | [
"text_plain_output_1.png"
] | print(f'Round 1:\n{GRPB_R1_M1}\n{GRPB_R1_M2}\n')
print(f'Round 2:\n{GRPB_R2_M1}\n{GRPB_R2_M2}\n')
print(f'Round 3:\n{GRPB_R3_M1}\n{GRPB_R3_M2}') | code |
73073728/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_log_error
model = LinearRegression(iterations=10000, learning_rate=1e-09)
model.fit(X_train, y_train)
mean_squared_log_error(y_test, model.predict(X_test))
scaler = StandardScaler()
scaler.fit(X_train)
X_train, X_test = (scaler.transform(X_train), scaler.transform(X_test))
model = LinearRegression(iterations=10000, learning_rate=0.1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mean_squared_log_error(y_test, y_pred) | code |
73073728/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_log_error
model = LinearRegression(iterations=10000, learning_rate=1e-09)
model.fit(X_train, y_train)
mean_squared_log_error(y_test, model.predict(X_test)) | code |
106195240/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
n_months = 12
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(scaled_train, scaled_train, length=n_months, batch_size=1)
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=(n_months, 1)), tf.keras.layers.LSTM(100, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(50, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(25, activation='tanh'), tf.keras.layers.Dense(1)])
model.compile(optimizer='adam', loss='mse')
model.summary()
early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, patience=2)
history = model.fit(generator, epochs=10, callbacks=[early_stop])
temp = scaled_train[-n_months:]
temp = temp.reshape((1, n_months, 1))
model.predict(temp) | code |
106195240/cell_13 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train) | code |
106195240/cell_4 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
data.head() | code |
106195240/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
n_months = 12
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(scaled_train, scaled_train, length=n_months, batch_size=1)
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=(n_months, 1)), tf.keras.layers.LSTM(100, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(50, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(25, activation='tanh'), tf.keras.layers.Dense(1)])
model.compile(optimizer='adam', loss='mse')
model.summary()
early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, patience=2)
history = model.fit(generator, epochs=10, callbacks=[early_stop])
temp = scaled_train[-n_months:]
temp = temp.reshape((1, n_months, 1))
model.predict(temp)
test_predictions = []
last_batch = scaled_train[-n_months:]
current_batch = last_batch.reshape((1, n_months, 1))
for i in range(test.shape[0]):
pred = model.predict(current_batch)[0]
test_predictions.append(pred)
current_batch = np.append(current_batch[:, 1:, :], [[pred]], axis=1)
test['LSTM_predictions'] = scaler.inverse_transform(test_predictions) | code |
106195240/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
data.plot(figsize=(12, 6))
plt.show() | code |
106195240/cell_19 | [
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
n_months = 12
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(scaled_train, scaled_train, length=n_months, batch_size=1)
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=(n_months, 1)), tf.keras.layers.LSTM(100, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(50, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(25, activation='tanh'), tf.keras.layers.Dense(1)])
model.compile(optimizer='adam', loss='mse')
model.summary()
early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, patience=2)
history = model.fit(generator, epochs=10, callbacks=[early_stop])
plt.plot(history.history['loss'])
plt.show() | code |
106195240/cell_18 | [
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
n_months = 12
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(scaled_train, scaled_train, length=n_months, batch_size=1)
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=(n_months, 1)), tf.keras.layers.LSTM(100, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(50, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(25, activation='tanh'), tf.keras.layers.Dense(1)])
model.compile(optimizer='adam', loss='mse')
model.summary()
early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, patience=2)
history = model.fit(generator, epochs=10, callbacks=[early_stop]) | code |
106195240/cell_8 | [
"image_output_1.png"
] | from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
decomposed = seasonal_decompose(data['Production'])
fig = decomposed.plot()
fig.set_size_inches((25, 9))
fig.show() | code |
106195240/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
print(data.shape) | code |
106195240/cell_17 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
n_months = 12
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(scaled_train, scaled_train, length=n_months, batch_size=1)
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=(n_months, 1)), tf.keras.layers.LSTM(100, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(50, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(25, activation='tanh'), tf.keras.layers.Dense(1)])
model.compile(optimizer='adam', loss='mse')
model.summary() | code |
106195240/cell_24 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
n_months = 12
generator = tf.keras.preprocessing.sequence.TimeseriesGenerator(scaled_train, scaled_train, length=n_months, batch_size=1)
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=(n_months, 1)), tf.keras.layers.LSTM(100, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(50, activation='tanh', return_sequences=True), tf.keras.layers.LSTM(25, activation='tanh'), tf.keras.layers.Dense(1)])
model.compile(optimizer='adam', loss='mse')
model.summary()
early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, patience=2)
history = model.fit(generator, epochs=10, callbacks=[early_stop])
temp = scaled_train[-n_months:]
temp = temp.reshape((1, n_months, 1))
model.predict(temp)
test_predictions = []
last_batch = scaled_train[-n_months:]
current_batch = last_batch.reshape((1, n_months, 1))
for i in range(test.shape[0]):
pred = model.predict(current_batch)[0]
test_predictions.append(pred)
current_batch = np.append(current_batch[:, 1:, :], [[pred]], axis=1)
test.plot(figsize=(12, 6))
plt.title('Forecasted vs Actual Time Series')
plt.show() | code |
106195240/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
train = data[:'1992-12-31']
test = data['1993-01-01':]
print('Shape of training set: ', train.shape)
print('Shape of testing set: ', test.shape) | code |
106195240/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/time-series-datasets/monthly-beer-production-in-austr.csv', index_col='Month', parse_dates=True)
data.columns = ['Production']
data.tail() | code |
89135912/cell_9 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
from nltk.corpus import stopwords
import re
stop_words = stopwords.words('english')
def clean(review):
clean_html = BeautifulSoup(review).get_text()
clean_non_letters = re.sub('[^a-zA-Z]', ' ', clean_html)
cleaned_lowecase = clean_non_letters.lower()
words = cleaned_lowecase.split()
clean_words = [w for w in words if w not in stop_words]
return ' '.join(clean_words)
clean('hello my name in 67&%gopal goyal ') | code |
89135912/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
totaldata = pd.concat([traindata, testdata], axis=0)
totaldata.shape
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
vec = cv.fit_transform(totaldata['cleanreview'])
X = vec[:len(traindata)]
Y = traindata.sentiment
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
model = RandomForestClassifier()
model.fit(x_train, y_train)
pred = model.predict(x_test)
accuracy_score(pred, y_test)
data = vec[len(traindata):]
data.shape
pred = model.predict(data)
submissoin['sentiment'] = pred
submissoin.head() | code |
89135912/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
traindata['review'][0] | code |
89135912/cell_23 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
totaldata = pd.concat([traindata, testdata], axis=0)
totaldata.shape
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
vec = cv.fit_transform(totaldata['cleanreview'])
X = vec[:len(traindata)]
Y = traindata.sentiment
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
print(len(traindata), vec.shape) | code |
89135912/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
totaldata = pd.concat([traindata, testdata], axis=0)
totaldata.shape
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
vec = cv.fit_transform(totaldata['cleanreview'])
X = vec[:len(traindata)]
Y = traindata.sentiment
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
model = RandomForestClassifier()
model.fit(x_train, y_train)
pred = model.predict(x_test)
accuracy_score(pred, y_test)
data = vec[len(traindata):]
data.shape
pred = model.predict(data)
submissoin['sentiment'] = pred
submission = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
output = pd.DataFrame(data={'id': testdata.id, 'sentiment': pred})
output | code |
89135912/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
totaldata = pd.concat([traindata, testdata], axis=0)
totaldata.shape
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
vec = cv.fit_transform(totaldata['cleanreview'])
X = vec[:len(traindata)]
Y = traindata.sentiment
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
model = RandomForestClassifier()
model.fit(x_train, y_train)
pred = model.predict(x_test)
accuracy_score(pred, y_test)
data = vec[len(traindata):]
data.shape
pred = model.predict(data)
submissoin['sentiment'] = pred
submissoin.drop('sentiment', axis=1, inplace=True)
submissoin.columns | code |
89135912/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
testdata.head() | code |
89135912/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89135912/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
totaldata = pd.concat([traindata, testdata], axis=0)
totaldata.shape | code |
89135912/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
totaldata = pd.concat([traindata, testdata], axis=0)
totaldata.shape
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
vec = cv.fit_transform(totaldata['cleanreview'])
X = vec[:len(traindata)]
Y = traindata.sentiment
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
data = vec[len(traindata):]
data.shape | code |
89135912/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
traindata.head() | code |
89135912/cell_22 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
model = RandomForestClassifier()
model.fit(x_train, y_train)
pred = model.predict(x_test)
accuracy_score(pred, y_test) | code |
89135912/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
traindata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/labeledTrainData.tsv.zip', delimiter='\t')
testdata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/testData.tsv.zip', header=0, delimiter='\t')
unlabledata = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/unlabeledTrainData.tsv.zip', header=0, delimiter='\t', quoting=3)
submissoin = pd.read_csv('/kaggle/input/word2vec-nlp-tutorial/sampleSubmission.csv', header=0, delimiter='\t')
traindata.head() | code |
16126718/cell_7 | [
"text_plain_output_1.png"
] | from PIL import Image
import numpy as np
import os
import pandas as pd
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
print('TRAIN---------------------')
print('Shape: {}'.format(train.shape))
print('Label 0 (False): {}'.format(np.sum(labels_train.has_cactus == 0)))
print('Label 1 (True): {}'.format(np.sum(labels_train.has_cactus == 1)))
print('TEST----------------------')
print('Shape: {}'.format(test.shape)) | code |
16126718/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = RandomForestClassifier(criterion='entropy', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
preds = model.predict(test)
print('Label 0 (False): {}'.format(np.sum(preds == 0)))
print('Label 1 (True): {}'.format(np.sum(preds == 1)))
results = pd.DataFrame({'id': test_id, 'has_cactus': preds})
results.to_csv('submission.csv', index=False) | code |
16126718/cell_10 | [
"text_plain_output_1.png"
] | from PIL import Image
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test) | code |
16126718/cell_12 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = RandomForestClassifier(criterion='entropy', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test) | code |
18146048/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.info() | code |
18146048/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.head() | code |
18146048/cell_25 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
label = student_data['G3'].values
predictors = student_data.drop(axis=1, labels=['G3']).values
pca = PCA(n_components=len(student_data.columns) - 1)
pca.fit(predictors)
variance = pca.explained_variance_
variance
variance_ratio_cum_sum = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4) * 100)
pca = PCA(n_components=9)
pca.fit(predictors)
Transformed_vector = pca.fit_transform(predictors)
print(Transformed_vector) | code |
18146048/cell_23 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
label = student_data['G3'].values
predictors = student_data.drop(axis=1, labels=['G3']).values
pca = PCA(n_components=len(student_data.columns) - 1)
pca.fit(predictors)
variance = pca.explained_variance_
variance
print(pca.explained_variance_ratio_) | code |
18146048/cell_6 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape | code |
18146048/cell_26 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
label = student_data['G3'].values
predictors = student_data.drop(axis=1, labels=['G3']).values
pca = PCA(n_components=len(student_data.columns) - 1)
pca.fit(predictors)
variance = pca.explained_variance_
variance
student_data_without_output = student_data.drop(axis=1, labels=['G3'], inplace=False)
features = student_data_without_output.columns
features | code |
18146048/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
student_data.info() | code |
18146048/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
label = student_data['G3'].values
predictors = student_data.drop(axis=1, labels=['G3']).values
predictors | code |
18146048/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_por.head() | code |
18146048/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_por.shape | code |
18146048/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape | code |
18146048/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data[['G1', 'G2', 'G3']].corr() | code |
18146048/cell_3 | [
"text_plain_output_1.png"
] | import os
import os
print(os.listdir('../input')) | code |
18146048/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
student_data.head() | code |
18146048/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
label = student_data['G3'].values
predictors = student_data.drop(axis=1, labels=['G3']).values
pca = PCA(n_components=len(student_data.columns) - 1)
pca.fit(predictors)
variance = pca.explained_variance_
variance
variance_ratio_cum_sum = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4) * 100)
print(variance_ratio_cum_sum)
plt.plot(variance_ratio_cum_sum) | code |
18146048/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.head() | code |
18146048/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string
student_data = pd.get_dummies(student_data, columns=columns_string, drop_first=True)
student_data.shape
student_data.drop(axis=1, labels=['G1'], inplace=True)
label = student_data['G3'].values
predictors = student_data.drop(axis=1, labels=['G3']).values
pca = PCA(n_components=len(student_data.columns) - 1)
pca.fit(predictors)
variance = pca.explained_variance_
variance | code |
18146048/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape | code |
18146048/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.shape
student_data_por.shape
student_data = pd.merge(student_data_mat, student_data_por, how='outer')
student_data.shape
columns_string = student_data.columns[student_data.dtypes == object]
columns_string | code |
18146048/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
student_data_mat = pd.read_csv('../input/student_math.csv', delimiter=';')
student_data_por = pd.read_csv('../input/student_language.csv', delimiter=';')
student_data_mat.head() | code |
73095391/cell_20 | [
"text_html_output_1.png"
] | import cv2
import glob
import numpy as np
import pandas as pd
RESIZED_WIDTH, RESIZED_HEIGHT = (224, 224)
EACH_WIDTH, EACH_HEIGHT = (RESIZED_WIDTH // 2, RESIZED_HEIGHT // 2)
OUTPUT_FORMAT = 'jpg'
OUTPUT_DIR = 'data_argument_224_224'
train_dir = 'train_images'
train_paths = glob.glob(f'{data_dir}/{train_dir}/*.jpg')
TRAIN_DF = pd.read_csv('../input/pp-csv/clearned_train.csv')
TRAIN_DF
EXTRA_TRAIN = pd.DataFrame(columns=['image', 'labels'])
def concat_img(imgs):
if len(imgs) < 4:
return
output_shape = (EACH_WIDTH, EACH_HEIGHT)
imgs = [cv2.resize(img, output_shape) for img in imgs]
upper_img = cv2.vconcat([imgs[0], imgs[1]])
lower_img = cv2.vconcat([imgs[2], imgs[3]])
img = cv2.hconcat([upper_img, lower_img])
return img
length = len(TRAIN_DF)
indexs = np.random.randint(0, length, size=4)
for count in range(3000):
imgs = []
new_labels = []
indexs = np.random.randint(0, length, size=4)
for index in indexs:
filepath = train_paths[index]
img = cv2.imread(filepath)
imgs.append(img)
labels = TRAIN_DF.iloc[index, 1]
for disease in labels.split(' '):
if disease == 'healthy' or disease in new_labels:
continue
new_labels.append(disease)
if not new_labels:
new_label = 'healthy'
else:
new_label = ' '.join(new_labels)
img_name = f'{count}.jpg'
new_img = concat_img(imgs)
new_file_path = f'{OUTPUT_DIR}/{train_dir}/{img_name}'
cv2.imwrite(new_file_path, new_img)
EXTRA_TRAIN = EXTRA_TRAIN.append({'image': img_name, 'labels': new_label}, ignore_index=True)
EXTRA_TRAIN | code |
73095391/cell_1 | [
"text_plain_output_1.png"
] | !apt install zip | code |
73095391/cell_7 | [
"text_plain_output_1.png"
] | data_dir = "../input/plant-pathology-2021-fgvc8"
!ls {data_dir} | code |
73095391/cell_22 | [
"text_html_output_1.png"
] | !zip -r {OUTPUT_DIR}_resized.zip ./{OUTPUT_DIR}/* | code |
73095391/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
TRAIN_DF = pd.read_csv('../input/pp-csv/clearned_train.csv')
TRAIN_DF | code |
1007093/cell_9 | [
"image_output_1.png"
] | from keras.layers import Dense, Dropout, Lambda, Flatten
from keras.models import Sequential
from keras.optimizers import Adam ,RMSprop
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_file = pd.read_csv('../input/train.csv')
test_images = pd.read_csv('../input/test.csv')
train_images = train_file.ix[:, 1:].values.astype('float32')
train_labels = train_file.ix[:, 0].values.astype('int32')
train_images = train_images.reshape((42000, 28 * 28))
train_images = train_images / 255
test_images = test_images / 255
train_labels = np_utils.to_categorical(train_labels)
num_classes = train_labels.shape[1]
seed = 43
np.random.seed(seed)
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=28 * 28))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_images, train_labels, validation_split=0.05, nb_epoch=25, batch_size=64) | code |
1007093/cell_4 | [
"image_output_1.png"
] | import pandas as pd
train_file = pd.read_csv('../input/train.csv')
test_images = pd.read_csv('../input/test.csv')
train_images = train_file.ix[:, 1:].values.astype('float32')
print(train_images.shape)
train_labels = train_file.ix[:, 0].values.astype('int32')
print(train_labels.shape) | code |
1007093/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Lambda, Flatten
from keras.optimizers import Adam, RMSprop
from sklearn.model_selection import train_test_split | code |
1007093/cell_15 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Dropout, Lambda, Flatten
from keras.models import Sequential
from keras.optimizers import Adam ,RMSprop
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_file = pd.read_csv('../input/train.csv')
test_images = pd.read_csv('../input/test.csv')
train_images = train_file.ix[:, 1:].values.astype('float32')
train_labels = train_file.ix[:, 0].values.astype('int32')
train_images = train_images.reshape((42000, 28 * 28))
train_images = train_images / 255
test_images = test_images / 255
train_labels = np_utils.to_categorical(train_labels)
num_classes = train_labels.shape[1]
seed = 43
np.random.seed(seed)
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=28 * 28))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_images, train_labels, validation_split=0.05, nb_epoch=25, batch_size=64)
history_dict = history.history
history_dict.keys() | code |
1007093/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo')
plt.plot(epochs, val_loss_values, 'b+')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show() | code |
1007093/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
train_file = pd.read_csv('../input/train.csv')
print(train_file.shape)
test_images = pd.read_csv('../input/test.csv')
print(test_images.shape) | code |
1007093/cell_17 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Dropout, Lambda, Flatten
from keras.models import Sequential
from keras.optimizers import Adam ,RMSprop
from keras.utils import np_utils
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train_file = pd.read_csv('../input/train.csv')
test_images = pd.read_csv('../input/test.csv')
train_images = train_file.ix[:, 1:].values.astype('float32')
train_labels = train_file.ix[:, 0].values.astype('int32')
train_images = train_images.reshape((42000, 28 * 28))
train_images = train_images / 255
test_images = test_images / 255
train_labels = np_utils.to_categorical(train_labels)
num_classes = train_labels.shape[1]
seed = 43
np.random.seed(seed)
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=28 * 28))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_images, train_labels, validation_split=0.05, nb_epoch=25, batch_size=64)
history_dict = history.history
history_dict.keys()
plt.clf()
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc_values, 'bo')
plt.plot(epochs, val_acc_values, 'b+')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show() | code |
1007093/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense, Dropout, Lambda, Flatten
from keras.models import Sequential
from keras.optimizers import Adam ,RMSprop
from keras.utils import np_utils
import numpy as np
import pandas as pd
train_file = pd.read_csv('../input/train.csv')
test_images = pd.read_csv('../input/test.csv')
train_images = train_file.ix[:, 1:].values.astype('float32')
train_labels = train_file.ix[:, 0].values.astype('int32')
train_images = train_images.reshape((42000, 28 * 28))
train_images = train_images / 255
test_images = test_images / 255
train_labels = np_utils.to_categorical(train_labels)
num_classes = train_labels.shape[1]
seed = 43
np.random.seed(seed)
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=28 * 28))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_images, train_labels, validation_split=0.05, nb_epoch=25, batch_size=64)
print(model.summary()) | code |
1008693/cell_25 | [
"text_html_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
leave_result = employees['left']
y = np.where(leave_result == 1, 1, 0)
y
X = employees.drop('left', axis=1).as_matrix().astype(np.float)
X
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
from sklearn.model_selection import KFold
def run_cv(X, y, clf_class, **kwargs):
kf = KFold(n_splits=3, shuffle=True)
y_pred = y.copy()
for train_index, test_index in kf.split(X):
X_train, X_test = (X[train_index], X[test_index])
y_train = y[train_index]
clf = clf_class(**kwargs)
clf.fit(X_train, y_train)
y_pred[test_index] = clf.predict(X_test)
return y_pred
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LR
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.metrics import average_precision_score
def accuracy(y_true, y_pred):
return np.mean(y_true == y_pred)
print('Logistic Regression:')
print('%.3f' % accuracy(y, run_cv(X, y, LR)))
print('Gradient Boosting Classifier')
print('%.3f' % accuracy(y, run_cv(X, y, GBC)))
print('Support vector machines:')
print('%.3f' % accuracy(y, run_cv(X, y, SVC)))
print('Random forest:')
print('%.3f' % accuracy(y, run_cv(X, y, RF)))
print('K-nearest-neighbors:')
print('%.3f' % accuracy(y, run_cv(X, y, KNN))) | code |
1008693/cell_4 | [
"text_plain_output_1.png"
] | employees.shape | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.