path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106191525/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') print(data.shape) data.head()
code
106191525/cell_29
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.neighbors import LocalOutlierFactor from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns import time data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] Important_Data = data[top_feature.values] top_corr = data[top_feature].corr() radius = data[['radius_mean', 'radius_se', 'radius_worst', 'diagnosis']] texture = data[['texture_mean', 'texture_se', 'texture_worst', 'diagnosis']] perimeter = data[['perimeter_mean', 'perimeter_se', 'perimeter_worst', 'diagnosis']] area = data[['area_mean', 'area_se', 'area_worst', 'diagnosis']] smoothness = data[['smoothness_mean', 'smoothness_se', 'smoothness_worst', 'diagnosis']] compactness = data[['compactness_mean', 'compactness_se', 'compactness_worst', 'diagnosis']] concavity = data[['concavity_mean', 'concavity_se', 'concavity_worst', 'diagnosis']] concave_points = data[['concave points_mean', 'concave points_se', 'concave points_worst', 'diagnosis']] symmetry = data[['symmetry_mean', 'symmetry_se', 'symmetry_worst', 'diagnosis']] fractal_dimension = data[['fractal_dimension_mean', 'fractal_dimension_se', 'fractal_dimension_worst', 'diagnosis']] X = Important_Data.drop(['diagnosis'], axis=1) Y = Important_Data.diagnosis columns = Important_Data.columns.tolist() lof = LocalOutlierFactor() y_pred = lof.fit_predict(X) y_pred[0:30] x_score = lof.negative_outlier_factor_ outlier_score = pd.DataFrame() outlier_score['score'] = x_score lofthreshold = -2.5 loffilter = outlier_score['score'] < lofthreshold outlier_index = outlier_score[loffilter].index.tolist() radius = (x_score.max() - x_score) / (x_score.max() - x_score.min()) outlier_score['radius'] = radius X = X.drop(outlier_index) Y = Y.drop(outlier_index).values x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=10) MM = StandardScaler() x_train = MM.fit_transform(x_train) x_test = MM.fit_transform(x_test) ACC = [] TM = [] SVC = SVC(kernel='linear', C=0.01) time1 = time.time() SVC.fit(x_train, y_train) time2 = time.time() time_interval = time2 - time1 print(' Training Time :', time_interval, 'Seconds') y_predicted_svm = SVC.predict(x_test) print('Testing accurency :', metrics.accuracy_score(y_test, y_predicted_svm) * 100, ' %') y_predicted_svm1 = SVC.predict(x_train) print('Training accurency :', metrics.accuracy_score(y_train, y_predicted_svm1) * 100, ' %') ACC.append(metrics.accuracy_score(y_test, y_predicted_svm) * 100) TM.append(time_interval)
code
106191525/cell_26
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.neighbors import LocalOutlierFactor import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] Important_Data = data[top_feature.values] top_corr = data[top_feature].corr() radius = data[['radius_mean', 'radius_se', 'radius_worst', 'diagnosis']] texture = data[['texture_mean', 'texture_se', 'texture_worst', 'diagnosis']] perimeter = data[['perimeter_mean', 'perimeter_se', 'perimeter_worst', 'diagnosis']] area = data[['area_mean', 'area_se', 'area_worst', 'diagnosis']] smoothness = data[['smoothness_mean', 'smoothness_se', 'smoothness_worst', 'diagnosis']] compactness = data[['compactness_mean', 'compactness_se', 'compactness_worst', 'diagnosis']] concavity = data[['concavity_mean', 'concavity_se', 'concavity_worst', 'diagnosis']] concave_points = data[['concave points_mean', 'concave points_se', 'concave points_worst', 'diagnosis']] symmetry = data[['symmetry_mean', 'symmetry_se', 'symmetry_worst', 'diagnosis']] fractal_dimension = data[['fractal_dimension_mean', 'fractal_dimension_se', 'fractal_dimension_worst', 'diagnosis']] X = Important_Data.drop(['diagnosis'], axis=1) Y = Important_Data.diagnosis columns = Important_Data.columns.tolist() lof = LocalOutlierFactor() y_pred = lof.fit_predict(X) y_pred[0:30] x_score = lof.negative_outlier_factor_ outlier_score = pd.DataFrame() outlier_score['score'] = x_score lofthreshold = -2.5 loffilter = outlier_score['score'] < lofthreshold outlier_index = outlier_score[loffilter].index.tolist() radius = (x_score.max() - x_score) / (x_score.max() - x_score.min()) outlier_score['radius'] = radius X = X.drop(outlier_index) Y = Y.drop(outlier_index).values Important_Data.isna().sum()
code
106191525/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') le = LabelEncoder() data['diagnosis'] = le.fit_transform(data['diagnosis']) data['diagnosis'].head()
code
106191525/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') data.info()
code
106191525/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] Important_Data = data[top_feature.values] top_corr = data[top_feature].corr() print(Important_Data.shape) Important_Data.describe()
code
106191525/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] Important_Data = data[top_feature.values] top_corr = data[top_feature].corr() radius = data[['radius_mean', 'radius_se', 'radius_worst', 'diagnosis']] sns.pairplot(radius, hue='diagnosis', palette='husl', markers=['o', 's']) texture = data[['texture_mean', 'texture_se', 'texture_worst', 'diagnosis']] sns.pairplot(texture, hue='diagnosis', palette='Blues_d') perimeter = data[['perimeter_mean', 'perimeter_se', 'perimeter_worst', 'diagnosis']] sns.pairplot(perimeter, hue='diagnosis') area = data[['area_mean', 'area_se', 'area_worst', 'diagnosis']] sns.pairplot(area, hue='diagnosis') smoothness = data[['smoothness_mean', 'smoothness_se', 'smoothness_worst', 'diagnosis']] sns.pairplot(smoothness, hue='diagnosis') compactness = data[['compactness_mean', 'compactness_se', 'compactness_worst', 'diagnosis']] sns.pairplot(compactness, hue='diagnosis') concavity = data[['concavity_mean', 'concavity_se', 'concavity_worst', 'diagnosis']] sns.pairplot(concavity, hue='diagnosis') concave_points = data[['concave points_mean', 'concave points_se', 'concave points_worst', 'diagnosis']] sns.pairplot(concave_points, hue='diagnosis') symmetry = data[['symmetry_mean', 'symmetry_se', 'symmetry_worst', 'diagnosis']] sns.pairplot(symmetry, hue='diagnosis') fractal_dimension = data[['fractal_dimension_mean', 'fractal_dimension_se', 'fractal_dimension_worst', 'diagnosis']] sns.pairplot(fractal_dimension, hue='diagnosis')
code
128026526/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age age_pivot = titanic_train.pivot_table(index='Parch', values='Age', aggfunc=('mean', 'median')) age_pivot titanic_train.Age = titanic_train.groupby('Parch')['Age'].transform(lambda Age_grouped: Age_grouped.fillna(Age_grouped.median())) titanic_train.Cabin = titanic_train.Cabin.fillna('unknown') titanic_train[titanic_train.Embarked.isna()]
code
128026526/cell_4
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) titanic_test.head()
code
128026526/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128026526/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) titanic_train.info()
code
128026526/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) titanic_train.describe()
code
128026526/cell_15
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age age_pivot = titanic_train.pivot_table(index='Parch', values='Age', aggfunc=('mean', 'median')) age_pivot
code
128026526/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age age_pivot = titanic_train.pivot_table(index='Parch', values='Age', aggfunc=('mean', 'median')) age_pivot titanic_train.Age = titanic_train.groupby('Parch')['Age'].transform(lambda Age_grouped: Age_grouped.fillna(Age_grouped.median())) print(f'Количество пропусков в столбце "Age" после заполнения - {titanic_train.Age.isna().sum()}')
code
128026526/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) titanic_train.head()
code
128026526/cell_17
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age age_pivot = titanic_train.pivot_table(index='Parch', values='Age', aggfunc=('mean', 'median')) age_pivot titanic_train.Age = titanic_train.groupby('Parch')['Age'].transform(lambda Age_grouped: Age_grouped.fillna(Age_grouped.median())) titanic_train.Cabin.value_counts(dropna=False).head(15)
code
128026526/cell_24
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age age_pivot = titanic_train.pivot_table(index='Parch', values='Age', aggfunc=('mean', 'median')) age_pivot titanic_train.Age = titanic_train.groupby('Parch')['Age'].transform(lambda Age_grouped: Age_grouped.fillna(Age_grouped.median())) titanic_train.Cabin = titanic_train.Cabin.fillna('unknown') titanic_train[titanic_train.Embarked.isna()] ticket_embarked = titanic_train[titanic_train.Embarked.isna()]['Ticket'].values[0] titanic_train.query('Ticket == @ticket_embarked') titanic_train.Embarked.value_counts()
code
128026526/cell_14
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age
code
128026526/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum() parch_missed_age = titanic_train[titanic_train.Age.isna()].Parch.value_counts() parch_missed_age age_pivot = titanic_train.pivot_table(index='Parch', values='Age', aggfunc=('mean', 'median')) age_pivot titanic_train.Age = titanic_train.groupby('Parch')['Age'].transform(lambda Age_grouped: Age_grouped.fillna(Age_grouped.median())) titanic_train.Cabin = titanic_train.Cabin.fillna('unknown') titanic_train[titanic_train.Embarked.isna()] ticket_embarked = titanic_train[titanic_train.Embarked.isna()]['Ticket'].values[0] titanic_train.query('Ticket == @ticket_embarked')
code
128026526/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) print(f'Количество строк-дубликатов в обучающей выборке - {titanic_train.duplicated().sum()}') print(f'Количество строк-дубликатов в тестовой выборке - {titanic_train.duplicated().sum()}') print() columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() print(f'Количество дубликатов в столбце {column} - {duplicated_qty}')
code
128026526/cell_12
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) columns_to_look = ['PassengerId', 'Name', 'Ticket'] for column in columns_to_look: duplicated_qty = titanic_train[column].duplicated().sum() titanic_train.isna().sum()
code
128026526/cell_5
[ "text_html_output_1.png" ]
import pandas as pd titanic_train = pd.read_csv('/kaggle/input/titanic/train.csv') titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv') titanic_gender_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv').head(10) titanic_gender_submission.head()
code
322554/cell_21
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) class MultiColumnLabelEncoder: def __init__(self, columns=None): self.columns = columns def fit(self, X, y=None): return self def transform(self, X): output = X.copy() if self.columns is not None: for col in self.columns: output[col] = LabelEncoder().fit_transform(output[col]) else: for colname, col in output.iteritems(): output[colname] = LabelEncoder().fit_transform(col) return output def fit_transform(self, X, y=None): return self.fit(X, y).transform(X) def processor(data): data = MultiColumnLabelEncoder(columns=['people_id', 'activity_id', 'activity_category', 'date_x', 'char_1_x', 'char_2_x', 'char_3_x', 'char_4_x', 'char_5_x', 'char_6_x', 'char_7_x', 'char_8_x', 'char_9_x', 'char_10_x', 'char_1_y', 'group_1', 'char_2_y', 'date_y', 'char_3_y', 'char_4_y', 'char_5_y', 'char_6_y', 'char_7_y', 'char_8_y', 'char_9_y']).fit_transform(df) bool_map = {True: 1, False: 0} data = data.applymap(lambda x: bool_map.get(x, x)) return data df_encoded = processor(df) df_encoded.dtypes X = df_encoded y = X.pop('outcome') print(X.shape) print(y.shape)
code
322554/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) df_test.head()
code
322554/cell_23
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(77, n_jobs=-1, random_state=7) model.fit(X_train, y_train) print('model score ', model.score(X_test, y_test))
code
322554/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') print(df.shape) print(df_test.shape)
code
322554/cell_18
[ "text_html_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) def processor(data): data = MultiColumnLabelEncoder(columns=['people_id', 'activity_id', 'activity_category', 'date_x', 'char_1_x', 'char_2_x', 'char_3_x', 'char_4_x', 'char_5_x', 'char_6_x', 'char_7_x', 'char_8_x', 'char_9_x', 'char_10_x', 'char_1_y', 'group_1', 'char_2_y', 'date_y', 'char_3_y', 'char_4_y', 'char_5_y', 'char_6_y', 'char_7_y', 'char_8_y', 'char_9_y']).fit_transform(df) bool_map = {True: 1, False: 0} data = data.applymap(lambda x: bool_map.get(x, x)) return data df_test_encoded = processor(df_test) df_test_encoded.dtypes
code
322554/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) df.head()
code
322554/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) def processor(data): data = MultiColumnLabelEncoder(columns=['people_id', 'activity_id', 'activity_category', 'date_x', 'char_1_x', 'char_2_x', 'char_3_x', 'char_4_x', 'char_5_x', 'char_6_x', 'char_7_x', 'char_8_x', 'char_9_x', 'char_10_x', 'char_1_y', 'group_1', 'char_2_y', 'date_y', 'char_3_y', 'char_4_y', 'char_5_y', 'char_6_y', 'char_7_y', 'char_8_y', 'char_9_y']).fit_transform(df) bool_map = {True: 1, False: 0} data = data.applymap(lambda x: bool_map.get(x, x)) return data df_encoded = processor(df) df_encoded.dtypes
code
322554/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from IPython.display import display, HTML from sklearn.preprocessing import LabelEncoder from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier
code
322554/cell_17
[ "text_html_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) def processor(data): data = MultiColumnLabelEncoder(columns=['people_id', 'activity_id', 'activity_category', 'date_x', 'char_1_x', 'char_2_x', 'char_3_x', 'char_4_x', 'char_5_x', 'char_6_x', 'char_7_x', 'char_8_x', 'char_9_x', 'char_10_x', 'char_1_y', 'group_1', 'char_2_y', 'date_y', 'char_3_y', 'char_4_y', 'char_5_y', 'char_6_y', 'char_7_y', 'char_8_y', 'char_9_y']).fit_transform(df) bool_map = {True: 1, False: 0} data = data.applymap(lambda x: bool_map.get(x, x)) return data df_test_encoded = processor(df_test) df_test_encoded.head()
code
322554/cell_24
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(77, n_jobs=-1, random_state=7) model.fit(X_train, y_train) pred = model.predict(X_test) pred
code
322554/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd people = pd.read_csv('../input/people.csv') activity_train = pd.read_csv('../input/act_train.csv') activity_test = pd.read_csv('../input/act_test.csv') df = activity_train.merge(people, how='left', on='people_id') df_test = activity_test.merge(people, how='left', on='people_id') df = df.fillna('0', axis=0) df_test = df_test.fillna('0', axis=0) def processor(data): data = MultiColumnLabelEncoder(columns=['people_id', 'activity_id', 'activity_category', 'date_x', 'char_1_x', 'char_2_x', 'char_3_x', 'char_4_x', 'char_5_x', 'char_6_x', 'char_7_x', 'char_8_x', 'char_9_x', 'char_10_x', 'char_1_y', 'group_1', 'char_2_y', 'date_y', 'char_3_y', 'char_4_y', 'char_5_y', 'char_6_y', 'char_7_y', 'char_8_y', 'char_9_y']).fit_transform(df) bool_map = {True: 1, False: 0} data = data.applymap(lambda x: bool_map.get(x, x)) return data df_encoded = processor(df) df_encoded.head()
code
2034706/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.plotly as py from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) foot = pd.read_csv('../input/epldata_final.csv')
code
2034706/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.plotly as py from subprocess import check_output foot = pd.read_csv('../input/epldata_final.csv') under_21_big6 = foot[np.logical_and(foot['age'] <= 21, foot['big_club'] == 1)] for lab, row in under_21_big6.iterrows(): print(row['name'] + ' play in ' + row['club'])
code
2034706/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.plotly as py from subprocess import check_output foot = pd.read_csv('../input/epldata_final.csv') pl_under = foot[foot['age'] <= 21] def count_u21(pl_under, *args): u21_dir = {} for col_name in args: col = pl_under[col_name] for entry in col: if entry in u21_dir.keys(): u21_dir[entry] += 1 else: u21_dir[entry] = 1 return u21_dir clubs_ = count_u21(pl_under, 'club') print(clubs_) plt.hist(clubs_) plt.xlabel('Clubs') plt.ylabel('No. of players') plt.title('U21 distribution - Clubs') plt.show()
code
90135317/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) # Data Exploration # Convert timestamp to datetime. it will be easier to read (especially the time-delta's) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary) # From results, we see that the time range is 2009-01-09 -> 2018-09-10 # Also, the greatest block delay was 1 day and 4:47:00! # Note that 75th percentile of block times is only 00:12:54. Clearly block times of 02:00:00 are rare. df.diff().plot(kind='line') maxidx = df.idxmax() print(df['timestamp'][maxidx['timestamp']])
code
90135317/cell_6
[ "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from datetime import datetime, timedelta from google.cloud import bigquery from scipy.stats import norm import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) # Data Exploration # Convert timestamp to datetime. it will be easier to read (especially the time-delta's) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary) # From results, we see that the time range is 2009-01-09 -> 2018-09-10 # Also, the greatest block delay was 1 day and 4:47:00! # Note that 75th percentile of block times is only 00:12:54. Clearly block times of 02:00:00 are rare. maxidx = df.idxmax() from scipy.stats import norm from datetime import datetime, timedelta import numpy as np # More data processing df = df.diff() df = df.dropna() # since we have a column of diffs, the first entry will need to be ignored print(df.head()) print(df.describe()) print(df.dtypes) # convert timedelta type to a float (seconds) print(df['timestamp'][2].total_seconds()) df['timestamp'] = df['timestamp'].apply(lambda x: x.total_seconds()) float_summary = df.describe() print(float_summary) time_threshold = timedelta(hours=2).total_seconds() print(float_summary["timestamp"][1]) # mean print(float_summary["timestamp"][2]) # standard dev df_cdf = norm.cdf(time_threshold, float_summary['timestamp'][1], float_summary['timestamp'][2]) print((1 - df_cdf) * df.shape[0]) print(df.shape[0]) print(1 - df_cdf) print(df.timestamp.quantile(0.99)) print(len(df[df['timestamp'] > time_threshold]))
code
90135317/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) print('Size of dataframe: {} Bytes'.format(int(df.memory_usage(index=True, deep=True).sum()))) df.head(10)
code
90135317/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary)
code
90135317/cell_5
[ "text_plain_output_1.png" ]
from bq_helper import BigQueryHelper from datetime import datetime from datetime import datetime, timedelta from google.cloud import bigquery import numpy as np import pandas as pd from google.cloud import bigquery from bq_helper import BigQueryHelper client = bigquery.Client() query = '\n #standardSQL\n SELECT\n timestamp\n FROM \n `bigquery-public-data.bitcoin_blockchain.blocks`\n ORDER BY\n timestamp\n ' bq_assistant = BigQueryHelper('bigquery-public-data', 'bitcoin_blockchain') df = bq_assistant.query_to_pandas_safe(query, max_gb_scanned=1000) original = df.copy() from datetime import datetime df = original.copy() df = df.sort_values(by=['timestamp'], ascending=True) # Data Exploration # Convert timestamp to datetime. it will be easier to read (especially the time-delta's) ts_col = df['timestamp'].div(1000.0) df['timestamp'] = ts_col.apply(datetime.fromtimestamp) print(df.describe()) summary = df.diff().describe() print(summary) # From results, we see that the time range is 2009-01-09 -> 2018-09-10 # Also, the greatest block delay was 1 day and 4:47:00! # Note that 75th percentile of block times is only 00:12:54. Clearly block times of 02:00:00 are rare. maxidx = df.idxmax() from scipy.stats import norm from datetime import datetime, timedelta import numpy as np df = df.diff() df = df.dropna() print(df.head()) print(df.describe()) print(df.dtypes) print(df['timestamp'][2].total_seconds()) df['timestamp'] = df['timestamp'].apply(lambda x: x.total_seconds()) float_summary = df.describe() print(float_summary) time_threshold = timedelta(hours=2).total_seconds() print(float_summary['timestamp'][1]) print(float_summary['timestamp'][2])
code
324023/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv') data.columns.values
code
324023/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
324023/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv')
code
18129560/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
score = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE) print() print('ACCURACY:', score[1]) print('LOSS:', score[0])
code
18129560/cell_13
[ "text_html_output_1.png" ]
print('train w2v ....') w2v_model.train(documents, total_examples=len(documents), epochs=W2V_EPOCH) print('done')
code
18129560/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
documents = [_text.split() for _text in df_train.text] print('training tweets count', len(documents))
code
18129560/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import LabelEncoder import numpy as np import numpy as np # linear algebra import time DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' documents = [_text.split() for _text in df_train.text] words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love') labels = df_train.target.unique().tolist() labels.append(NEUTRAL) encoder = LabelEncoder() encoder.fit(df_train.target.tolist()) y_train = encoder.transform(df_train.target.tolist()) y_test = encoder.transform(df_test.target.tolist()) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) embedding_matrix = np.zeros((vocab_size, W2V_SIZE)) for word, i in tokenizer.word_index.items(): if word in w2v_model.wv: embedding_matrix[i] = w2v_model.wv[word] embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False) model = Sequential() model.add(embedding_layer) model.add(Dropout(0.5)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) def decode_sentiment(score, include_neutral=True): if include_neutral: label = NEUTRAL if score <= SENTIMENT_THRESHOLDS[0]: label = NEGATIVE elif score >= SENTIMENT_THRESHOLDS[1]: label = POSITIVE return label else: return NEGATIVE if score < 0.5 else POSITIVE def predict(text, include_neutral=True): start_at = time.time() x_test = pad_sequences(tokenizer.texts_to_sequences([text]), maxlen=SEQUENCE_LENGTH) score = model.predict([x_test])[0] label = decode_sentiment(score, include_neutral=include_neutral) return {'label': label, 'score': float(score), 'elapsed_time': time.time() - start_at} predict('I hate the rain')
code
18129560/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import LabelEncoder import numpy as np import numpy as np # linear algebra import pickle import time DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' documents = [_text.split() for _text in df_train.text] words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love') labels = df_train.target.unique().tolist() labels.append(NEUTRAL) encoder = LabelEncoder() encoder.fit(df_train.target.tolist()) y_train = encoder.transform(df_train.target.tolist()) y_test = encoder.transform(df_test.target.tolist()) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) embedding_matrix = np.zeros((vocab_size, W2V_SIZE)) for word, i in tokenizer.word_index.items(): if word in w2v_model.wv: embedding_matrix[i] = w2v_model.wv[word] embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False) model = Sequential() model.add(embedding_layer) model.add(Dropout(0.5)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) def decode_sentiment(score, include_neutral=True): if include_neutral: label = NEUTRAL if score <= SENTIMENT_THRESHOLDS[0]: label = NEGATIVE elif score >= SENTIMENT_THRESHOLDS[1]: label = POSITIVE return label else: return NEGATIVE if score < 0.5 else POSITIVE def predict(text, include_neutral=True): start_at = time.time() x_test = pad_sequences(tokenizer.texts_to_sequences([text]), maxlen=SEQUENCE_LENGTH) score = model.predict([x_test])[0] label = decode_sentiment(score, include_neutral=include_neutral) return {'label': label, 'score': float(score), 'elapsed_time': time.time() - start_at} model.save(KERAS_MODEL) w2v_model.save(WORD2VEC_MODEL) pickle.dump(tokenizer, open(TOKENIZER_MODEL, 'wb'), protocol=0) pickle.dump(encoder, open(ENCODER_MODEL, 'wb'), protocol=0)
code
18129560/cell_20
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
callbacks = [ReduceLROnPlateau(monitor='val_loss', patience=5, cooldown=0), EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=5)] history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=0.1, verbose=1, callbacks=callbacks)
code
18129560/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' """ Dataset details target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive) ids: The id of the tweet ( 2087) date: the date of the tweet (Sat May 16 23:58:44 UTC 2009) flag: The query (lyx). If there is no query, then this value is NO_QUERY. user: the user that tweeted (robotickilldozr) text: the text of the tweet (Lyx is cool) """ dataset_filename = os.listdir('../input')[0] dataset_path = os.path.join('..', 'input', dataset_filename) df = pd.read_csv(dataset_path, encoding=DATASET_ENCODING, names=DATASET_COLUMNS) df.head(5)
code
18129560/cell_29
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score accuracy_score(y_test_1d, y_pred_1d)
code
18129560/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import LabelEncoder import numpy as np import numpy as np # linear algebra import time DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' documents = [_text.split() for _text in df_train.text] words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love') labels = df_train.target.unique().tolist() labels.append(NEUTRAL) encoder = LabelEncoder() encoder.fit(df_train.target.tolist()) y_train = encoder.transform(df_train.target.tolist()) y_test = encoder.transform(df_test.target.tolist()) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) embedding_matrix = np.zeros((vocab_size, W2V_SIZE)) for word, i in tokenizer.word_index.items(): if word in w2v_model.wv: embedding_matrix[i] = w2v_model.wv[word] embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False) model = Sequential() model.add(embedding_layer) model.add(Dropout(0.5)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) def decode_sentiment(score, include_neutral=True): if include_neutral: label = NEUTRAL if score <= SENTIMENT_THRESHOLDS[0]: label = NEGATIVE elif score >= SENTIMENT_THRESHOLDS[1]: label = POSITIVE return label else: return NEGATIVE if score < 0.5 else POSITIVE def predict(text, include_neutral=True): start_at = time.time() x_test = pad_sequences(tokenizer.texts_to_sequences([text]), maxlen=SEQUENCE_LENGTH) score = model.predict([x_test])[0] label = decode_sentiment(score, include_neutral=include_neutral) return {'label': label, 'score': float(score), 'elapsed_time': time.time() - start_at} predict("i don't know what i'm doing")
code
18129560/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix, classification_report, accuracy_score from sklearn.manifold import TSNE from sklearn.feature_extraction.text import TfidfVectorizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM from keras import utils from keras.callbacks import ReduceLROnPlateau, EarlyStopping import nltk from nltk.corpus import stopwords from nltk.stem import SnowballStemmer import gensim import re import numpy as np import os from collections import Counter import logging import time import pickle import itertools logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) print(os.listdir('../input'))
code
18129560/cell_11
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
w2v_model.build_vocab(documents)
code
18129560/cell_19
[ "text_plain_output_1.png" ]
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM from keras.models import Sequential import numpy as np import numpy as np # linear algebra DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love') embedding_matrix = np.zeros((vocab_size, W2V_SIZE)) for word, i in tokenizer.word_index.items(): if word in w2v_model.wv: embedding_matrix[i] = w2v_model.wv[word] embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False) model = Sequential() model.add(embedding_layer) model.add(Dropout(0.5)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
code
18129560/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
decode_map = {0: 'NEGATIVE', 2: 'NEUTRAL', 4: 'POSITIVE'} def decode_sentiment(label): return decode_map[int(label)] df.target = df.target.apply(lambda x: decode_sentiment(x))
code
18129560/cell_18
[ "text_plain_output_1.png" ]
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM import numpy as np import numpy as np # linear algebra DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love') embedding_matrix = np.zeros((vocab_size, W2V_SIZE)) for word, i in tokenizer.word_index.items(): if word in w2v_model.wv: embedding_matrix[i] = w2v_model.wv[word] print(embedding_matrix.shape) embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False)
code
18129560/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score print(classification_report(y_test_1d, y_pred_1d))
code
18129560/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' """ Dataset details target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive) ids: The id of the tweet ( 2087) date: the date of the tweet (Sat May 16 23:58:44 UTC 2009) flag: The query (lyx). If there is no query, then this value is NO_QUERY. user: the user that tweeted (robotickilldozr) text: the text of the tweet (Lyx is cool) """ dataset_filename = os.listdir('../input')[0] dataset_path = os.path.join('..', 'input', dataset_filename) df = pd.read_csv(dataset_path, encoding=DATASET_ENCODING, names=DATASET_COLUMNS) df_train, df_test = train_test_split(df, test_size=1 - TRAIN_SIZE, random_state=42) print('TRAIN size:', len(df_train)) print('TEST size:', len(df_test))
code
18129560/cell_15
[ "text_plain_output_1.png" ]
tokenizer = Tokenizer() tokenizer.fit_on_texts(df_train.text) vocab_size = len(tokenizer.word_index) + 1 print('Total words', vocab_size)
code
18129560/cell_16
[ "text_plain_output_1.png" ]
x_train = pad_sequences(tokenizer.texts_to_sequences(df_train.text), maxlen=SEQUENCE_LENGTH) x_test = pad_sequences(tokenizer.texts_to_sequences(df_test.text), maxlen=SEQUENCE_LENGTH)
code
18129560/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import nltk nltk.download('stopwords')
code
18129560/cell_17
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' documents = [_text.split() for _text in df_train.text] labels = df_train.target.unique().tolist() labels.append(NEUTRAL) encoder = LabelEncoder() encoder.fit(df_train.target.tolist()) y_train = encoder.transform(df_train.target.tolist()) y_test = encoder.transform(df_test.target.tolist()) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) print('x_train', x_train.shape) print('y_train', y_train.shape) print() print('x_test', x_test.shape) print('y_test', y_test.shape)
code
18129560/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import LabelEncoder import numpy as np import numpy as np # linear algebra import time DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' documents = [_text.split() for _text in df_train.text] words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love') labels = df_train.target.unique().tolist() labels.append(NEUTRAL) encoder = LabelEncoder() encoder.fit(df_train.target.tolist()) y_train = encoder.transform(df_train.target.tolist()) y_test = encoder.transform(df_test.target.tolist()) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) embedding_matrix = np.zeros((vocab_size, W2V_SIZE)) for word, i in tokenizer.word_index.items(): if word in w2v_model.wv: embedding_matrix[i] = w2v_model.wv[word] embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], input_length=SEQUENCE_LENGTH, trainable=False) model = Sequential() model.add(embedding_layer) model.add(Dropout(0.5)) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) def decode_sentiment(score, include_neutral=True): if include_neutral: label = NEUTRAL if score <= SENTIMENT_THRESHOLDS[0]: label = NEGATIVE elif score >= SENTIMENT_THRESHOLDS[1]: label = POSITIVE return label else: return NEGATIVE if score < 0.5 else POSITIVE def predict(text, include_neutral=True): start_at = time.time() x_test = pad_sequences(tokenizer.texts_to_sequences([text]), maxlen=SEQUENCE_LENGTH) score = model.predict([x_test])[0] label = decode_sentiment(score, include_neutral=include_neutral) return {'label': label, 'score': float(score), 'elapsed_time': time.time() - start_at} predict('I love the music')
code
18129560/cell_14
[ "text_plain_output_1.png" ]
words = w2v_model.wv.vocab.keys() vocab_size = len(words) w2v_model.most_similar('love')
code
18129560/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'b', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show()
code
18129560/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
w2v_model = gensim.models.word2vec.Word2Vec(size=W2V_SIZE, window=W2V_WINDOW, min_count=W2V_MIN_COUNT, workers=8)
code
18129560/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
y_pred_1d = [] y_test_1d = list(df_test.target) scores = model.predict(x_test, verbose=1, batch_size=8000) y_pred_1d = [decode_sentiment(score, include_neutral=False) for score in scores] def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title, fontsize=30) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90, fontsize=22) plt.yticks(tick_marks, classes, fontsize=22) fmt = '.2f' thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.ylabel('True label', fontsize=25) plt.xlabel('Predicted label', fontsize=25) cnf_matrix = confusion_matrix(y_test_1d, y_pred_1d) plt.figure(figsize=(12, 12)) plot_confusion_matrix(cnf_matrix, classes=df_train.target.unique(), title='Confusion matrix') plt.show()
code
18129560/cell_12
[ "text_plain_output_1.png" ]
words = w2v_model.wv.vocab.keys() vocab_size = len(words) print('Vocab size', vocab_size)
code
18129560/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) DATASET_COLUMNS = ['target', 'ids', 'date', 'flag', 'user', 'text'] DATASET_ENCODING = 'ISO-8859-1' TRAIN_SIZE = 0.8 TEXT_CLEANING_RE = '@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+' W2V_SIZE = 300 W2V_WINDOW = 7 W2V_EPOCH = 32 W2V_MIN_COUNT = 10 SEQUENCE_LENGTH = 300 EPOCHS = 8 BATCH_SIZE = 1024 POSITIVE = 'POSITIVE' NEGATIVE = 'NEGATIVE' NEUTRAL = 'NEUTRAL' SENTIMENT_THRESHOLDS = (0.4, 0.7) KERAS_MODEL = 'model.h5' WORD2VEC_MODEL = 'model.w2v' TOKENIZER_MODEL = 'tokenizer.pkl' ENCODER_MODEL = 'encoder.pkl' """ Dataset details target: the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive) ids: The id of the tweet ( 2087) date: the date of the tweet (Sat May 16 23:58:44 UTC 2009) flag: The query (lyx). If there is no query, then this value is NO_QUERY. user: the user that tweeted (robotickilldozr) text: the text of the tweet (Lyx is cool) """ dataset_filename = os.listdir('../input')[0] dataset_path = os.path.join('..', 'input', dataset_filename) print('Open file:', dataset_path) df = pd.read_csv(dataset_path, encoding=DATASET_ENCODING, names=DATASET_COLUMNS) print('Dataset size:', len(df))
code
106211916/cell_21
[ "text_html_output_1.png" ]
from sklearn import preprocessing import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) scaler = preprocessing.StandardScaler() scaler.fit(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_train = scaler.transform(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_test = scaler.transform(test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_test
code
106211916/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum()
code
106211916/cell_25
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) scaler = preprocessing.StandardScaler() scaler.fit(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_train = scaler.transform(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) from sklearn.linear_model import LogisticRegression algo = LogisticRegression(C=90000, solver='sag', max_iter=15000) algo.fit(X_train, train['Survived']) algo.score(X_train, train['Survived'])
code
106211916/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train['Embarked'].value_counts()
code
106211916/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106211916/cell_18
[ "text_html_output_1.png" ]
from sklearn import preprocessing import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) scaler = preprocessing.StandardScaler() scaler.fit(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']])
code
106211916/cell_28
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) scaler = preprocessing.StandardScaler() scaler.fit(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_train = scaler.transform(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_test = scaler.transform(test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) from sklearn.linear_model import LogisticRegression algo = LogisticRegression(C=90000, solver='sag', max_iter=15000) algo.fit(X_train, train['Survived']) algo.score(X_train, train['Survived']) ans = algo.predict(X_test) id1 = list(test['PassengerId']) type(id1) data = [] for i in range(len(ans)): temp = {} temp['PassengerId'] = id1[i] temp['Survived'] = ans[i] data.append(temp) data
code
106211916/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) test
code
106211916/cell_24
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.linear_model import LogisticRegression import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) scaler = preprocessing.StandardScaler() scaler.fit(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) X_train = scaler.transform(train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]) from sklearn.linear_model import LogisticRegression algo = LogisticRegression(C=90000, solver='sag', max_iter=15000) algo.fit(X_train, train['Survived'])
code
106211916/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) train
code
106211916/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv').copy() test = pd.read_csv('../input/titanic/test.csv').copy() train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True) train.drop('Ticket', axis=1, inplace=True) test.drop('Ticket', axis=1, inplace=True) train.isnull().sum() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) id1 = list(test['PassengerId']) type(id1)
code
129035406/cell_4
[ "text_plain_output_1.png" ]
!python3 /content/OIDv4_ToolKit/main.py downloader --classes Car --type_csv train --limit 100 --multiclasses 1 -y !python3 /content/OIDv4_ToolKit/main.py downloader --classes Car --type_csv validation --limit 30 --multiclasses 1 -y !python3 /content/OIDv4_ToolKit/main.py downloader --classes Car --type_csv test --limit 30 --multiclasses 1 -y
code
129035406/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
file_path = '/content/OIDv4_ToolKit/classes.txt' with open(file_path, mode='w') as f: f.write('Car') print(f'File {file_path} has been updated.')
code
129035406/cell_2
[ "text_plain_output_1.png" ]
!git clone https://github.com/EscVM/OIDv4_ToolKit.git
code
129035406/cell_3
[ "text_plain_output_1.png" ]
!pip3 install -r /content/OIDv4_ToolKit/requirements.txt
code
2019859/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.isnull().sum(axis=0) def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) train = train.drop(['Name', 'PassengerId', 'Survived', 'Ticket', 'Cabin'], axis=1) test = test.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) corrmat = train.corr() f, ax = plt.subplots(figsize=(5, 5)) sns.heatmap(corrmat, vmax=0.8, square=True)
code
2019859/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.isnull().sum(axis=0)
code
2019859/cell_6
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) test['Sex'].value_counts()
code
2019859/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.isnull().sum(axis=0) def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) train = train.drop(['Name', 'PassengerId', 'Survived', 'Ticket', 'Cabin'], axis=1) test = test.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) #correlation matrix corrmat = train.corr() f, ax = plt.subplots(figsize=(5, 5)) sns.heatmap(corrmat, vmax=.8, square=True); train.head()
code
2019859/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from sklearn.cross_validation import train_test_split from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC, SVC from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier, ExtraTreesClassifier, AdaBoostClassifier, BaggingClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.cross_validation import cross_val_score from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression, LogisticRegressionCV import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) from scipy import stats from scipy.stats import norm, skew from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2019859/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.isnull().sum(axis=0) train['Survived'].plot(kind='hist')
code
2019859/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.head()
code
2019859/cell_31
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import cross_val_score from sklearn.cross_validation import cross_val_score from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier, ExtraTreesClassifier, AdaBoostClassifier, BaggingClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.metrics import accuracy_score, make_scorer from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC,SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import xgboost as xgb train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.isnull().sum(axis=0) test_id = test['PassengerId'] target = train['Survived'] def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) train = train.drop(['Name', 'PassengerId', 'Survived', 'Ticket', 'Cabin'], axis=1) test = test.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1) #correlation matrix corrmat = train.corr() f, ax = plt.subplots(figsize=(5, 5)) sns.heatmap(corrmat, vmax=.8, square=True); vote_est = [('ada', AdaBoostClassifier()), ('bc', BaggingClassifier()), ('etc', ExtraTreesClassifier()), ('gbc', GradientBoostingClassifier()), ('rfc', RandomForestClassifier()), ('gpc', GaussianProcessClassifier()), ('lr', LogisticRegressionCV()), ('bnb', BernoulliNB()), ('gnb', GaussianNB()), ('knn', KNeighborsClassifier()), ('svc', SVC(probability=True)), ('xgb', xgb.XGBClassifier())] model = VotingClassifier(estimators=vote_est, voting='hard') from sklearn.cross_validation import cross_val_score from sklearn.metrics import accuracy_score, make_scorer print(cross_val_score(model, train, target, cv=5, scoring=make_scorer(accuracy_score)))
code
2019859/cell_5
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train.isnull().sum(axis=0) sns.distplot(train['Fare'])
code
32068245/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data data.info()
code
32068245/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data from sklearn.linear_model import LinearRegression linear_reg = LinearRegression() x = data.deneyim.values.reshape(-1, 1) y = data.maas.values.reshape(-1, 1) linear_reg.fit(x, y) import numpy as np a = linear_reg.predict([[0]]) a_ = linear_reg.intercept_ b = linear_reg.coef_ maas_yeni = 1663 + 1138 * 11 print(maas_yeni) print(linear_reg.predict([[11]]))
code
32068245/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068245/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data
code
32068245/cell_18
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data from sklearn.linear_model import LinearRegression linear_reg = LinearRegression() x = data.deneyim.values.reshape(-1, 1) y = data.maas.values.reshape(-1, 1) linear_reg.fit(x, y) import numpy as np a = linear_reg.predict([[0]]) a_ = linear_reg.intercept_ b = linear_reg.coef_ print('b: ', b)
code