path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
50240297/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex1 = main_df[['Survived', 'Sex']] main_df_sex1 = main_df_sex1.value_counts().to_frame() main_df_sex1.reset_index(drop=False, inplace=True) main_df_sex1.rename(columns={0: 'Counts'}, inplace=True) main_df_sex1['Survived'] = main_df_sex1['Survived'].replace([0, 1], ['Not-Survived', 'Survived']) main_df_sex1.set_index(['Survived', 'Sex'], drop=True, inplace=True) main_df_sex1
code
50240297/cell_28
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) working_df = main_df working_df.drop(['Name', 'Cabin', 'Ticket'], inplace=True, axis=1) working_class_df = working_df[['Pclass', 'Survived']] working_class_df working_class_df_plot = working_class_df.groupby(['Pclass', 'Survived'])['Pclass'].count().to_frame() working_class_df_plot.rename(columns={'Pclass': 'Count'}, inplace=True) working_class_df_plot
code
50240297/cell_8
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() mean_age = main_df['Age'].mean() main_df['Age'].replace(np.NaN, mean_age, inplace=True) print('Column : Age count : ' + str(main_df['Age'].isnull().sum()))
code
50240297/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex = main_df['Sex'].value_counts() main_df_sex main_df_sex = main_df[['Survived', 'Sex']] main_df_sex_factor = pd.get_dummies(main_df_sex['Sex']) main_df_sex_factor main_df_sex['female'] = main_df_sex_factor['female'] main_df_sex['male'] = main_df_sex_factor['male'] main_df_sex.head()
code
50240297/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df.head()
code
50240297/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex = main_df['Sex'].value_counts() main_df_sex main_df_sex = main_df[['Survived', 'Sex']] main_df_sex_factor = pd.get_dummies(main_df_sex['Sex']) main_df_sex_factor main_df_sex['female'] = main_df_sex_factor['female'] main_df_sex['male'] = main_df_sex_factor['male'] main_df_sex = main_df[['Survived', 'Sex']] main_df_sex = pd.concat([main_df_sex, pd.get_dummies(main_df['Sex'])], axis=1) test_df = main_df_sex.groupby(['Survived', 'female', 'male'], as_index=False).count() test_df
code
50240297/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) working_df = main_df working_df.drop(['Name', 'Cabin', 'Ticket'], inplace=True, axis=1) working_class_df = working_df[['Pclass', 'Survived']] working_class_df working_class_df_plot = working_class_df.groupby(['Pclass', 'Survived'])['Pclass'].count().to_frame() working_class_df_plot.rename(columns={'Pclass': 'Count'}, inplace=True) working_class_df_plot working_class_df_plot.reset_index(inplace=True) working_class_df_plot.plot(kind='bar', figsize=(10, 6), color='darkblue') plt.title('Effect of field Pclass') plt.xlabel('Pclass') plt.ylabel('Number of People') plt.show()
code
50240297/cell_24
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df
code
50240297/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex = main_df['Sex'].value_counts() main_df_sex main_df_sex = main_df[['Survived', 'Sex']] main_df_sex_factor = pd.get_dummies(main_df_sex['Sex']) main_df_sex_factor
code
50240297/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex1 = main_df[['Survived', 'Sex']] main_df_sex1 = main_df_sex1.value_counts().to_frame() main_df_sex1.reset_index(drop=False, inplace=True) main_df_sex1.rename(columns={0: 'Counts'}, inplace=True) main_df_sex1['Survived'] = main_df_sex1['Survived'].replace([0, 1], ['Not-Survived', 'Survived']) main_df_sex1.set_index(['Survived', 'Sex'], drop=True, inplace=True) main_df_sex1 main_df_sex1.reset_index(drop=False, inplace=True) for i in main_df_sex1.index: if main_df_sex1.iloc[i]['Sex'] == 'male': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 577 * 100 if main_df_sex1.iloc[i]['Sex'] == 'female': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 312 * 100 main_df_sex1 male_stats = main_df_sex1[main_df_sex1['Sex'] == 'male'] male_stats.drop(['Counts'], axis=1, inplace=True) male_stats.set_index(['Survived', 'Sex'], drop=True, inplace=True) female_stats = main_df_sex1[main_df_sex1['Sex'] == 'female'] female_stats.drop(['Counts'], axis=1, inplace=True) female_stats.set_index(['Survived', 'Sex'], drop=True, inplace=True) female_stats
code
50240297/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) working_df = main_df working_df.drop(['Name', 'Cabin', 'Ticket'], inplace=True, axis=1) working_class_df = working_df[['Pclass', 'Survived']] working_class_df
code
50240297/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) print('Column : Embarked count : ' + str(main_df['Embarked'].isnull().sum()))
code
50240297/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) gender_sub_df.info()
code
320748/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas as pd numClasses = 10 numEig = 28 * 28 picSize = 28 * 28 trainData = pd.read_csv('../input/train.csv') testData = pd.read_csv('../input/test.csv') trainData.sort_values(by=['label'], inplace=True) trainY = trainData.iloc[:, 0].values trainX = trainData.iloc[:, 1:].values testX = testData.iloc[:, :].values trainMean = np.mean(trainX, axis=0) trainX = trainX - trainMean cov = np.cov(trainX.T) w, v = np.linalg.eig(cov) ws = np.sort(w) ws = ws[::-1] for i in range(0, numEig): v[:, i] = v[:, np.where(w == ws[i])[0][0]] v = v[:, :numEig].real del trainData, testData, cov, w, ws omega = np.zeros((numClasses, numEig, picSize)) for i in range(0, numClasses): trainDigit = trainX[np.where(trainY == i)] print('calculating weights for digit %d, samples %d' % (i, len(trainDigit))) for k in range(0, len(trainDigit)): tmp = v.T * trainDigit[k] omega[i] += tmp omega[i] /= len(trainDigit)
code
320748/cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas as pd numClasses = 10 numEig = 28 * 28 picSize = 28 * 28 trainData = pd.read_csv('../input/train.csv') testData = pd.read_csv('../input/test.csv') trainData.sort_values(by=['label'], inplace=True) trainY = trainData.iloc[:, 0].values trainX = trainData.iloc[:, 1:].values testX = testData.iloc[:, :].values trainMean = np.mean(trainX, axis=0) trainX = trainX - trainMean cov = np.cov(trainX.T) w, v = np.linalg.eig(cov) ws = np.sort(w) ws = ws[::-1] for i in range(0, numEig): v[:, i] = v[:, np.where(w == ws[i])[0][0]] v = v[:, :numEig].real del trainData, testData, cov, w, ws omega = np.zeros((numClasses, numEig, picSize)) for i in range(0, numClasses): trainDigit = trainX[np.where(trainY == i)] for k in range(0, len(trainDigit)): tmp = v.T * trainDigit[k] omega[i] += tmp omega[i] /= len(trainDigit) orig = testX[np.random.randint(0, len(testX))] omega_m = v.T * (orig - trainMean) dist = np.zeros(numClasses) for i in range(0, numClasses): dist[i] = np.linalg.norm(omega[i] - omega_m) i = dist.argmin() recon = v.T * omega_m recon = np.sum(recon, axis=0) + trainMean match = v.T * omega[i] match = np.sum(match, axis=0) + trainMean fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True) ax1.imshow(orig.reshape(28, 28), cmap=plt.cm.gray) ax1.axis('off') ax1.set_title('testX', fontsize=10) ax2.imshow(recon.reshape(28, 28), cmap=plt.cm.gray) ax2.axis('off') ax2.set_title('reconstruct', fontsize=10) ax3.imshow(match.reshape(28, 28), cmap=plt.cm.gray) ax3.axis('off') ax3.set_title('match', fontsize=10) plt.show() plt.close()
code
33105482/cell_13
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes df = df.select_dtypes(exclude=['object']) df df.isnull().sum() df.target
code
33105482/cell_9
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes
code
33105482/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') train.head()
code
33105482/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test.head()
code
33105482/cell_19
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes df = df.select_dtypes(exclude=['object']) df zero_not_accepted = ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] for columns in zero_not_accepted: df[columns] = df[columns].replace(0, np.NaN) mean = int(df[columns].mean(skipna=True)) df[columns] = df[columns].replace(np.NaN, mean) sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.fit_transform(X_test) y_train.notna() classifier = KNeighborsClassifier(n_neighbors=97, p=2, metric='euclidean') classifier.fit(X_train.loc[X_train['target'].notna()][zero_not_accepted], y_train[y_train.notna()])
code
33105482/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33105482/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.head()
code
33105482/cell_18
[ "text_plain_output_1.png" ]
y_train.notna()
code
33105482/cell_8
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.describe()
code
33105482/cell_15
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes df = df.select_dtypes(exclude=['object']) df df.isnull().sum() df.target X = df.iloc[:, 0:8] y = df.iloc[:, 7] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.2) y.shape
code
33105482/cell_17
[ "text_html_output_1.png" ]
import math import math math.sqrt(len(y_test))
code
33105482/cell_24
[ "text_plain_output_1.png" ]
!head /kaggle/working/submit.csv
code
33105482/cell_22
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes df = df.select_dtypes(exclude=['object']) df zero_not_accepted = ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] for columns in zero_not_accepted: df[columns] = df[columns].replace(0, np.NaN) mean = int(df[columns].mean(skipna=True)) df[columns] = df[columns].replace(np.NaN, mean) df.isnull().sum() df.target X = df.iloc[:, 0:8] y = df.iloc[:, 7] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.2) sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.fit_transform(X_test) y_train.notna() classifier = KNeighborsClassifier(n_neighbors=97, p=2, metric='euclidean') classifier.fit(X_train.loc[X_train['target'].notna()][zero_not_accepted], y_train[y_train.notna()]) y_predict = classifier.predict_proba(df.loc[df['target'].isna()][zero_not_accepted]) df_submit = pd.DataFrame({'uid': df.loc[df['target'].isna()]['uid'], 'target': y_predict[:, 1]}) df_submit
code
33105482/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes df = df.select_dtypes(exclude=['object']) df
code
33105482/cell_12
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') test['target'] = np.nan df = pd.concat([train, test]) df.dtypes df = df.select_dtypes(exclude=['object']) df df.isnull().sum()
code
33105482/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/train.csv') test = pd.read_csv('/kaggle/input/ods-mlclass-dubai-2019-03-lecture3-hw/test.csv') print(train.shape) print(test.shape)
code
2024900/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
""" Converting numbers into words TO DO """
code
2024900/cell_30
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) """ Remove blancs """ def remove_blanc(tokens): tokens = [token.strip() for token in tokens] return tokens tokens = remove_blanc(tokens) """ Removing accent marks and other diacritics """ def remove_accent(tokens): tokens = [unidecode.unidecode(token) for token in tokens] return tokens tokens = remove_accent(tokens) print('After removing accent markes ', tokens)
code
2024900/cell_33
[ "text_plain_output_1.png" ]
from collections import Counter from collections import Counter from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk import nltk import re import re import string text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) """ Remove blancs """ def remove_blanc(tokens): tokens = [token.strip() for token in tokens] return tokens tokens = remove_blanc(tokens) def remove_before_token(sentence, keep_apostrophe=False): sentence = sentence.strip() if keep_apostrophe: PATTERN = '[?|$|&|*|%|@|(|)|~]' filtered_sentence = re.sub(PATTERN, ' ', sentence) else: PATTERN = '[^a-zA-Z0-9]' filtered_sentence = re.sub(PATTERN, ' ', sentence) return filtered_sentence remove_before_token(text1) def remove_after_token(tokens): pattern = re.compile('[{}]'.format(re.escape(string.punctuation))) filtered_tokens = filter(None, [pattern.sub('', token) for token in tokens]) filtered_text = ' '.join(filtered_tokens) return filtered_text remove_special_characters(tokens) """ Expanding contraction """ CONTRACTION_MAP = {"ain't": 'is not', "aren't": 'are not', "can't": 'cannot', "can't've": 'cannot have', "'cause": 'because', "could've": 'could have', "couldn't": 'could not', "couldn't've": 'could not have', "didn't": 'did not', "doesn't": 'does not', "don't": 'do not', "hadn't": 'had not', "hadn't've": 'had not have', "hasn't": 'has not', "haven't": 'have not', "he'd": 'he would', "he'd've": 'he would have', "he'll": 'he will', "he'll've": 'he he will have', "he's": 'he is', "how'd": 'how did', "how'd'y": 'how do you', "how'll": 'how will', "how's": 'how is', "I'd": 'I would', "I'd've": 'I would have', "I'll": 'I will', "I'll've": 'I will have', "I'm": 'I am', "I've": 'I have', "i'd": 'i would', "i'd've": 'i would have', "i'll": 'i will', "i'll've": 'i will have', "i'm": 'i am', "i've": 'i have', "isn't": 'is not', "it'd": 'it would', "it'd've": 'it would have', "it'll": 'it will', "it'll've": 'it will have', "it's": 'it is', "let's": 'let us', "ma'am": 'madam', "mayn't": 'may not', "might've": 'might have', "mightn't": 'might not', "mightn't've": 'might not have', "must've": 'must have', "mustn't": 'must not', "mustn't've": 'must not have', "needn't": 'need not', "needn't've": 'need not have', "o'clock": 'of the clock', "oughtn't": 'ought not', "oughtn't've": 'ought not have', "shan't": 'shall not', "sha'n't": 'shall not', "shan't've": 'shall not have', "she'd": 'she would', "she'd've": 'she would have', "she'll": 'she will', "she'll've": 'she will have', "she's": 'she is', "should've": 'should have', "shouldn't": 'should not', "shouldn't've": 'should not have', "so've": 'so have', "so's": 'so as', "this's": 'this is', "that'd": 'that would', "that'd've": 'that would have', "that's": 'that is', "there'd": 'there would', "there'd've": 'there would have', "there's": 'there is', "they'd": 'they would', "they'd've": 'they would have', "they'll": 'they will', "they'll've": 'they will have', "they're": 'they are', "they've": 'they have', "to've": 'to have', "wasn't": 'was not', "we'd": 'we would', "we'd've": 'we would have', "we'll": 'we will', "we'll've": 'we will have', "we're": 'we are', "we've": 'we have', "weren't": 'were not', "what'll": 'what will', "what'll've": 'what will have', "what're": 'what are', "what's": 'what is', "what've": 'what have', "when's": 'when is', "when've": 'when have', "where'd": 'where did', "where's": 'where is', "where've": 'where have', "who'll": 'who will', "who'll've": 'who will have', "who's": 'who is', "who've": 'who have', "why's": 'why is', "why've": 'why have', "will've": 'will have', "won't": 'will not', "won't've": 'will not have', "would've": 'would have', "wouldn't": 'would not', "wouldn't've": 'would not have', "y'all": 'you all', "y'all'd": 'you all would', "y'all'd've": 'you all would have', "y'all're": 'you all are', "y'all've": 'you all have', "you'd": 'you would', "you'd've": 'you would have', "you'll": 'you will', "you'll've": 'you will have', "you're": 'you are', "you've": 'you have'} def expand_contractions(sentence, contraction_mapping): contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())), flags=re.IGNORECASE | re.DOTALL) def expand_match(contraction): match = contraction.group(0) first_char = match[0] expanded_contraction = contraction_mapping.get(match) if contraction_mapping.get(match) else contraction_mapping.get(match.lower()) expanded_contraction = first_char + expanded_contraction[1:] return expanded_contraction expanded_sentence = contractions_pattern.sub(expand_match, sentence) return expanded_sentence expanded_corpus = [expand_contractions(txt, CONTRACTION_MAP) for txt in sent_tokenize(text1)] """ Method 2 : Peter Norvig sur un seul mot """ import re import nltk from collections import Counter def words(text): return re.findall('\\w+', text.lower()) WORDS = Counter(words(open('../input/big.txt').read())) def P(word, N=sum(WORDS.values())): """Probability of `word`.""" return WORDS[word] / N def correction(word): """Most probable spelling correction for word.""" return max(candidates(word), key=P) def candidates(word): """Generate possible spelling corrections for word.""" return known([word]) or known(edits1(word)) or known(edits2(word)) or [word] def known(words): """The subset of `words` that appear in the dictionary of WORDS.""" return set((w for w in words if w in WORDS)) def edits1(word): """All edits that are one edit away from `word`.""" letters = 'abcdefghijklmnopqrstuvwxyz' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts) def edits2(word): """All edits that are two edits away from `word`.""" return (e2 for e1 in edits1(word) for e2 in edits1(e1)) correction('speling') correction('fial') correction('misstkaes')
code
2024900/cell_20
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) """ Remove blancs """ def remove_blanc(tokens): tokens = [token.strip() for token in tokens] return tokens tokens = remove_blanc(tokens) print(tokens)
code
2024900/cell_11
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ print('With a naive split \n', text1.split(' ')) '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) print('\nTokenizing text into words With NLTK \n', tokens) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() print('\nEquivalent method with TreebankWordTokenizer \n', tokenizer.tokenize(text1)) '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() print('\nEquivalent method with WordPunctTokenizer \n', tokenizer.tokenize(text1))
code
2024900/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) '\nImportation des librairies\n' import re import string import numpy as np import nltk from collections import Counter
code
2024900/cell_7
[ "text_plain_output_1.png" ]
from nltk.tokenize import sent_tokenize german_text = u'Die Orgellandschaft Südniedersachsen umfasst das Gebiet der Landkreise Goslar, Göttingen, Hameln-Pyrmont, Hildesheim, Holzminden, Northeim und Osterode am Harz sowie die Stadt Salzgitter. Über 70 historische Orgeln vom 17. bis 19. Jahrhundert sind in der südniedersächsischen Orgellandschaft vollständig oder in Teilen erhalten. ' print('\n', sent_tokenize(german_text, language='german')) print('\n', sent_tokenize(german_text, language='polish'))
code
2024900/cell_18
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) print(tokens)
code
2024900/cell_32
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import brown """ Method 1 : Using the brown corpus in NLTK and "in" operator """ from nltk.corpus import brown word_list = brown.words() len(word_list) word_set = set(word_list) 'looked' in word_set
code
2024900/cell_28
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk import re import string text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) """ Remove blancs """ def remove_blanc(tokens): tokens = [token.strip() for token in tokens] return tokens tokens = remove_blanc(tokens) def remove_before_token(sentence, keep_apostrophe=False): sentence = sentence.strip() if keep_apostrophe: PATTERN = '[?|$|&|*|%|@|(|)|~]' filtered_sentence = re.sub(PATTERN, ' ', sentence) else: PATTERN = '[^a-zA-Z0-9]' filtered_sentence = re.sub(PATTERN, ' ', sentence) return filtered_sentence remove_before_token(text1) def remove_after_token(tokens): pattern = re.compile('[{}]'.format(re.escape(string.punctuation))) filtered_tokens = filter(None, [pattern.sub('', token) for token in tokens]) filtered_text = ' '.join(filtered_tokens) return filtered_text remove_special_characters(tokens) """ Expanding contraction """ CONTRACTION_MAP = {"ain't": 'is not', "aren't": 'are not', "can't": 'cannot', "can't've": 'cannot have', "'cause": 'because', "could've": 'could have', "couldn't": 'could not', "couldn't've": 'could not have', "didn't": 'did not', "doesn't": 'does not', "don't": 'do not', "hadn't": 'had not', "hadn't've": 'had not have', "hasn't": 'has not', "haven't": 'have not', "he'd": 'he would', "he'd've": 'he would have', "he'll": 'he will', "he'll've": 'he he will have', "he's": 'he is', "how'd": 'how did', "how'd'y": 'how do you', "how'll": 'how will', "how's": 'how is', "I'd": 'I would', "I'd've": 'I would have', "I'll": 'I will', "I'll've": 'I will have', "I'm": 'I am', "I've": 'I have', "i'd": 'i would', "i'd've": 'i would have', "i'll": 'i will', "i'll've": 'i will have', "i'm": 'i am', "i've": 'i have', "isn't": 'is not', "it'd": 'it would', "it'd've": 'it would have', "it'll": 'it will', "it'll've": 'it will have', "it's": 'it is', "let's": 'let us', "ma'am": 'madam', "mayn't": 'may not', "might've": 'might have', "mightn't": 'might not', "mightn't've": 'might not have', "must've": 'must have', "mustn't": 'must not', "mustn't've": 'must not have', "needn't": 'need not', "needn't've": 'need not have', "o'clock": 'of the clock', "oughtn't": 'ought not', "oughtn't've": 'ought not have', "shan't": 'shall not', "sha'n't": 'shall not', "shan't've": 'shall not have', "she'd": 'she would', "she'd've": 'she would have', "she'll": 'she will', "she'll've": 'she will have', "she's": 'she is', "should've": 'should have', "shouldn't": 'should not', "shouldn't've": 'should not have', "so've": 'so have', "so's": 'so as', "this's": 'this is', "that'd": 'that would', "that'd've": 'that would have', "that's": 'that is', "there'd": 'there would', "there'd've": 'there would have', "there's": 'there is', "they'd": 'they would', "they'd've": 'they would have', "they'll": 'they will', "they'll've": 'they will have', "they're": 'they are', "they've": 'they have', "to've": 'to have', "wasn't": 'was not', "we'd": 'we would', "we'd've": 'we would have', "we'll": 'we will', "we'll've": 'we will have', "we're": 'we are', "we've": 'we have', "weren't": 'were not', "what'll": 'what will', "what'll've": 'what will have', "what're": 'what are', "what's": 'what is', "what've": 'what have', "when's": 'when is', "when've": 'when have', "where'd": 'where did', "where's": 'where is', "where've": 'where have', "who'll": 'who will', "who'll've": 'who will have', "who's": 'who is', "who've": 'who have', "why's": 'why is', "why've": 'why have', "will've": 'will have', "won't": 'will not', "won't've": 'will not have', "would've": 'would have', "wouldn't": 'would not', "wouldn't've": 'would not have', "y'all": 'you all', "y'all'd": 'you all would', "y'all'd've": 'you all would have', "y'all're": 'you all are', "y'all've": 'you all have', "you'd": 'you would', "you'd've": 'you would have', "you'll": 'you will', "you'll've": 'you will have', "you're": 'you are', "you've": 'you have'} def expand_contractions(sentence, contraction_mapping): contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())), flags=re.IGNORECASE | re.DOTALL) def expand_match(contraction): match = contraction.group(0) first_char = match[0] expanded_contraction = contraction_mapping.get(match) if contraction_mapping.get(match) else contraction_mapping.get(match.lower()) expanded_contraction = first_char + expanded_contraction[1:] return expanded_contraction expanded_sentence = contractions_pattern.sub(expand_match, sentence) return expanded_sentence expanded_corpus = [expand_contractions(txt, CONTRACTION_MAP) for txt in sent_tokenize(text1)] print('Text before expanding contraction : \n ', text1) print('\n Text after expanding contraction : \n ', expanded_corpus)
code
2024900/cell_8
[ "text_plain_output_1.png" ]
from nltk.tokenize import sent_tokenize import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') print('English token ', tokenizer.tokenize(text1)) french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') print('\nFrench token ', french_tokenizer.tokenize("Il fait beau aujourd'hui. Vas-tu sortir ? N'y a-t-il pas du pain ?"))
code
2024900/cell_16
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) print(tokens)
code
2024900/cell_3
[ "text_plain_output_1.png" ]
text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." print(text1)
code
2024900/cell_24
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk import re import string text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) """ Remove blancs """ def remove_blanc(tokens): tokens = [token.strip() for token in tokens] return tokens tokens = remove_blanc(tokens) def remove_before_token(sentence, keep_apostrophe=False): sentence = sentence.strip() if keep_apostrophe: PATTERN = '[?|$|&|*|%|@|(|)|~]' filtered_sentence = re.sub(PATTERN, ' ', sentence) else: PATTERN = '[^a-zA-Z0-9]' filtered_sentence = re.sub(PATTERN, ' ', sentence) return filtered_sentence remove_before_token(text1) def remove_after_token(tokens): pattern = re.compile('[{}]'.format(re.escape(string.punctuation))) filtered_tokens = filter(None, [pattern.sub('', token) for token in tokens]) filtered_text = ' '.join(filtered_tokens) return filtered_text remove_special_characters(tokens)
code
2024900/cell_22
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk import re text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def remove_before_token(sentence, keep_apostrophe=False): sentence = sentence.strip() if keep_apostrophe: PATTERN = '[?|$|&|*|%|@|(|)|~]' filtered_sentence = re.sub(PATTERN, ' ', sentence) else: PATTERN = '[^a-zA-Z0-9]' filtered_sentence = re.sub(PATTERN, ' ', sentence) return filtered_sentence remove_before_token(text1)
code
2024900/cell_37
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Starting point : tokens """ tokens = tokenize_word_text(text1) """ Converting all letters to lower or upper case (common : lower case) """ def convert_letters(tokens, style='lower'): if style == 'lower': tokens = [token.lower() for token in tokens] else: tokens = [token.upper() for token in tokens] return tokens tokens = convert_letters(tokens) """ Remove blancs """ def remove_blanc(tokens): tokens = [token.strip() for token in tokens] return tokens tokens = remove_blanc(tokens) """ Removing accent marks and other diacritics """ def remove_accent(tokens): tokens = [unidecode.unidecode(token) for token in tokens] return tokens tokens = remove_accent(tokens) """ Use a stopwords list """ stopword_list = nltk.corpus.stopwords.words('english') ' \nCreate your own stopwords list\n' stopwords = ['a', 'about', 'above', 'across', 'after', 'afterwards'] stopwords += ['again', 'against', 'all', 'almost', 'alone', 'along'] stopwords += ['this', 'is', 'your'] def removeStopwords(wordlist, stopwords): return [w for w in wordlist if w not in stopwords] tokens = nltk.word_tokenize(text1) removeStopwords(tokens, stopwords)
code
2024900/cell_12
[ "text_plain_output_1.png" ]
""" from nltk.tokenize import PunktSentenceTokenizer from nltk.corpus import state_union train_text = state_union.raw("2005-GWBush.txt") sample_text = state_union.raw("2006-GWBush.txt") # train the Punkt tokenizer like: custom_sent_tokenizer = PunktSentenceTokenizer(train_text) # we can actually tokenize tokenized = custom_sent_tokenizer.tokenize(sample_text) tokenized """
code
2024900/cell_5
[ "text_plain_output_1.png" ]
from nltk.tokenize import sent_tokenize import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize print('Sentence tokenize in NLTK With sent_tokenize \n', sent_tokenize(text1)) '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) print('\nSentence tokenize with PunktSentenceTokenizer \n ', print(sample_sentences))
code
2024900/cell_36
[ "text_plain_output_1.png" ]
from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import sent_tokenize import nltk import nltk import nltk text1 = "ThIs's ã sent tokenize test . this's sent two. is this sent three? sent 4 is cool! Now it's your turn." """ Sentence tokenize in NLTK with sent_tokenize The sent_tokenize function uses an instance of NLTK known as PunktSentenceTokenizer This instance of NLTK has already been trained to perform tokenization on different European languages on the basis of letters or punctuation that mark the beginning and end of sentences """ from nltk.tokenize import sent_tokenize '\nAutres manières \n' punkt_st = nltk.tokenize.PunktSentenceTokenizer() sample_sentences = punkt_st.tokenize(text1) """ Autre manière de procéder """ import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') french_tokenizer = nltk.data.load('tokenizers/punkt/french.pickle') """ Naive Split """ '\nTokenizing text into words\n' import nltk tokens = nltk.word_tokenize(text1) '\nEquivalent method with TreebankWordTokenizer\n' from nltk.tokenize import TreebankWordTokenizer tokenizer = TreebankWordTokenizer() '\nEquivalent method with WordPunctTokenizer \n' from nltk.tokenize import WordPunctTokenizer tokenizer = WordPunctTokenizer() def tokenize_word_text(text): tokens = nltk.word_tokenize(text) tokens = [token.strip() for token in tokens] return tokens """ Use a stopwords list """ stopword_list = nltk.corpus.stopwords.words('english') print('StopWords List in English : \n', stopword_list) ' \nCreate your own stopwords list\n' stopwords = ['a', 'about', 'above', 'across', 'after', 'afterwards'] stopwords += ['again', 'against', 'all', 'almost', 'alone', 'along'] stopwords += ['this', 'is', 'your']
code
49129174/cell_13
[ "text_plain_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) print('p-value is: ', pval) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) print('chi=%.6f, critical value=%.6f\n' % (chi, critical_value)) if chi > critical_value: print('At %.2f level of significance, we reject the null hypotheses and accept H1. \nThey are not independent.' % significance) else: print('At %.2f level of significance, we accept the null hypotheses. \nThey are independent.' % significance)
code
49129174/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 print('Correlation of Women Entrepreneurship Index with Entrepreneurship Index:', df.corr().iloc[4, 5])
code
49129174/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df.info()
code
49129174/cell_30
[ "text_plain_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) plt.figure(figsize=[10, 15]) sns.barplot(y='Country', x='Female Labor Force Participation Rate', data=df, hue='European Union Membership') plt.show()
code
49129174/cell_20
[ "text_plain_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) sns.relplot(data=df, x='Women Entrepreneurship Index', y='Entrepreneurship Index', hue='European Union Membership', col='Level of development')
code
49129174/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) df.head()
code
49129174/cell_29
[ "text_plain_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) plt.figure(figsize=[10, 15]) sns.barplot(y='Country', x='Inflation rate', data=df, hue='European Union Membership') plt.show()
code
49129174/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) plt.figure() sns.scatterplot(data=df, x='Women Entrepreneurship Index', y='Entrepreneurship Index') sns.regplot(data=df, x='Women Entrepreneurship Index', y='Entrepreneurship Index') plt.show()
code
49129174/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 print('Correlation of Level of development with European Union Member:', df.corr().iloc[1, 2])
code
49129174/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49129174/cell_18
[ "text_html_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) sns.relplot(data=df, x='Women Entrepreneurship Index', y='Entrepreneurship Index', hue='European Union Membership')
code
49129174/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 df.head()
code
49129174/cell_15
[ "text_html_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) sns.heatmap(pd.crosstab(df['Level of development'], df['European Union Membership']), annot=True)
code
49129174/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df.head()
code
49129174/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency from scipy.stats import mannwhitneyu from scipy.stats import spearmanr import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) from scipy.stats import mannwhitneyu p = mannwhitneyu(df['Women Entrepreneurship Index'], df['Entrepreneurship Index']) alpha = 0.05 from scipy.stats import spearmanr coef, p = spearmanr(df['Women Entrepreneurship Index'], df['Entrepreneurship Index']) print('Spearmans correlation coefficient: %.3f' % coef) alpha = 0.05 if p > alpha: print('Samples are uncorrelated (fail to reject H0) p =', p) else: print('Samples are correlated (reject H0) p =', p)
code
49129174/cell_22
[ "text_plain_output_1.png" ]
from scipy.stats import chi2 from scipy.stats import chi2_contingency from scipy.stats import mannwhitneyu import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/women-entrepreneurship-and-labor-force/Dataset3.csv', header=0, sep=';', names=['No', 'Country', 'Level of development', 'European Union Membership', 'Currency', 'Women Entrepreneurship Index', 'Entrepreneurship Index', 'Inflation rate', 'Female Labor Force Participation Rate']) df['Level of development'] = pd.get_dummies(df['Level of development'], drop_first=True) df['European Union Membership'] = pd.get_dummies(df['European Union Membership'], drop_first=True) df['Currency'] = pd.get_dummies(df['Currency'], drop_first=True) for i in range(len(df)): if df.iloc[i, 2] == 0: df.iloc[i, 2] = 1 elif df.iloc[i, 2] == 1: df.iloc[i, 2] = 0 if df.iloc[i, 3] == 0: df.iloc[i, 3] = 1 elif df.iloc[i, 3] == 1: df.iloc[i, 3] = 0 if df.iloc[i, 4] == 0: df.iloc[i, 4] = 1 elif df.iloc[i, 4] == 1: df.iloc[i, 4] = 0 from scipy.stats import chi2_contingency from scipy.stats import chi2 chi, pval, dof, exp = chi2_contingency(pd.crosstab(df['Level of development'], df['European Union Membership'])) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) from scipy.stats import mannwhitneyu p = mannwhitneyu(df['Women Entrepreneurship Index'], df['Entrepreneurship Index']) alpha = 0.05 if p[1] > 1.96 or p[1] < -1.96: print('There is no diffrenece between the ranks of the two columns p =', p[1]) else: print('There is diffrenece between the ranks of the two columns p =', p[1])
code
89127815/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip') test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip') train.head()
code
89127815/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip') test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip') train.describe()
code
89127815/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89127815/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip') test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip') train.head()
code
89127815/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/train.zip') test = pd.read_csv('/kaggle/input/nyc-taxi-trip-duration/test.zip') train.head()
code
128031511/cell_4
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') data_train_db.head()
code
128031511/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') from scipy import stats stdev_max = 6 pd.set_option('display.max_rows', data_train_db.shape[0] + 1) data_train = data_train_db.values labels = ('Runs', 'Does not run') sizes = [np.sum(data_train[:, 0]), np.sum(1 - data_train[:, 0])] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') plt.show()
code
128031511/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128031511/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') from scipy import stats stdev_max = 6 pd.set_option('display.max_rows', data_train_db.shape[0] + 1) data_train = data_train_db.values labels = 'Runs', 'Does not run' sizes = [np.sum(data_train[:,0]), np.sum(1-data_train[:,0])] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() corrMatrix = data_train_db.corr().abs() plt.figure(figsize=(12, 12)) sn.heatmap(corrMatrix, annot=False) plt.show() plt.figure(figsize=(12, 6)) plt.plot(np.arange(1, 100), corrMatrix['Running'][1:100]) plt.title('Correlation with Running') plt.show()
code
128031511/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') from scipy import stats stdev_max = 6 pd.set_option('display.max_rows', data_train_db.shape[0] + 1) data_train = data_train_db.values labels = 'Runs', 'Does not run' sizes = [np.sum(data_train[:,0]), np.sum(1-data_train[:,0])] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() def myclassifier(data): if data[1] == 0: return 0 if data[2] > 1.5 and (data[65] + data[66] < 5 or data[3] + data[4] + data[5] < 4.5 or data[80] + data[81] + data[82] < 4.5) or (data[2] < 1.5 and (data[99] > 3.5 or data[62] + data[63] + data[64] < 5 or data[77] + data[78] + data[79] < 4.5)): return 0 return 1 err1 = 0 for t in range(data_train.shape[0]): err1 = err1 + (myclassifier1(data_train[t, :]) != data_train[t, 0]) err2 = 0 for t in range(data_train.shape[0]): err2 = err2 + (myclassifier(data_train[t, :]) != data_train[t, 0]) print('Train accuracy 1:' + str(1 - err1 / data_train.shape[0])) print('Train accuracy 2:' + str(1 - err2 / data_train.shape[0])) print('difference: ' + str(1 - err1 / data_train.shape[0] - 1 + err2 / data_train.shape[0]))
code
128031511/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') from scipy import stats stdev_max = 6 pd.set_option('display.max_rows', data_train_db.shape[0] + 1) data_train = data_train_db.values labels = 'Runs', 'Does not run' sizes = [np.sum(data_train[:,0]), np.sum(1-data_train[:,0])] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() corrMatrix = data_train_db.corr().abs() data_train_db.columns[79] print(data_train_db.columns[77]) print(data_train_db.columns[11]) print(data_train_db.columns[24])
code
128031511/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') from scipy import stats stdev_max = 6 pd.set_option('display.max_rows', data_train_db.shape[0] + 1) data_train = data_train_db.values labels = 'Runs', 'Does not run' sizes = [np.sum(data_train[:,0]), np.sum(1-data_train[:,0])] fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() corrMatrix = data_train_db.corr().abs() data_train_db.columns[79] for i in range(2, 99): data_train_db[data_train_db.columns[i] + ' +'] = data_train_db[data_train_db.columns[i]] + data_train_db[data_train_db.columns[i + 1]] for i in range(2, 98): data_train_db[data_train_db.columns[i] + ' ++'] = data_train_db[data_train_db.columns[i]] + data_train_db[data_train_db.columns[i + 1]] + data_train_db[data_train_db.columns[i + 2]] corrMatrix2 = data_train_db.corr().abs() S = np.argsort(np.array(corrMatrix['Running']))[::-1] S = S[1:] S2 = np.argsort(np.array(corrMatrix2['Running']))[::-1] S2 = S2[1:] print(S) print(S2)
code
128031511/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train_db = pd.read_csv('/kaggle/input/data-train-db/data_train_db.csv') data_test_db = pd.read_csv('/kaggle/input/data-test-db/data_test_db.csv') from scipy import stats stdev_max = 6 pd.set_option('display.max_rows', data_train_db.shape[0] + 1) print(data_train_db.columns.get_loc('Controller blanchedalmond')) print(data_train_db.columns.get_loc('Controller darkgray')) print(data_train_db.columns.get_loc('Bending of test plate')) print(data_train_db.columns.get_loc('Second Counterweight')) print(data_train_db.columns.get_loc('Controller gainsboro'))
code
16147672/cell_9
[ "text_plain_output_1.png" ]
from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score,f1_score from sklearn.model_selection import GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from xgboost import XGBClassifier classes = [SVC(), RandomForestClassifier(), AdaBoostClassifier(), BaggingClassifier(), GradientBoostingClassifier(), GaussianNB(), XGBClassifier(), ExtraTreesClassifier()] params = [[{'kernel': ['rbf'], 'gamma': [0.001, 0.0001], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}], [{'n_estimators': [100, 200, 400], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [4, 5, 6, 7, 8], 'criterion': ['gini', 'entropy']}], [{'n_estimators': [100, 200, 300, 400, 500], 'learning_rate': [0.01, 0.1, 1]}], [{'n_estimators': [100, 200, 300, 400, 500], 'max_samples': [0.5, 0.75, 1.0]}], [{'loss': ['deviance'], 'n_estimators': [50, 100, 300, 400, 500], 'max_depth': [3, 5, 8]}], [{'priors': [None]}], [{'learning_rate': [0.03, 0.05], 'max_depth': [1, 2, 4, 6, 8, 10], 'n_estimators': [50, 100, 300, 500]}], [{'n_estimators': [100, 200, 400, 500], 'criterion': ['gini', 'entropy'], 'max_depth': [1, 2, 4, 6, 8, 10]}]] 'params1=[[ {\'n_estimators\': [100,200,400],\n\'learning_rate\':[0.01,0.1,1]}],[{\'n_estimators\': [100,200,400],\n\'max_samples\':[.5, .75, 1.0]}] ,[{"loss":["deviance"],"n_estimators":[50,100,300],\n"max_depth":[3,5,8]}] ,[{"priors":[None]}],[{\'learning_rate\':[.03, .05],\'max_depth\': [1,2,4,6,8,10],\'n_estimators\':[10, 50, 100, 300]} ],\n [{\'n_estimators\':[100,200,400],\'criterion\':[\'gini\', \'entropy\'],\'max_depth\':[1,2,4,6,8,10]}]]\n' testlist = [] paramslist = [] bestscore = [] bestestimator = [] for c, p in zip(classes, params): print('the model using is {}'.format(c)) print('\n') print('\n') grid_search = GridSearchCV(estimator=c, param_grid=p, cv=5, n_jobs=-1) grid_search.fit(train_x, train_y.values.ravel()) best_param = grid_search.best_params_ print(best_param) paramslist.append(best_param) best_score = grid_search.best_score_ bestscore.append(best_score) best_estimator = grid_search.best_estimator_ bestestimator.append(best_estimator) ypred = best_estimator.predict(test_x) testlist.append(tuple((accuracy_score(test_y, ypred), f1_score(test_y, ypred))))
code
16147672/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16147672/cell_7
[ "text_html_output_1.png" ]
from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data1 = data_train.copy(deep=True) data2 = data_test.copy(deep=True) frame = [data1, data2] lis = [] lis1 = [] for col in data1: if data1[col].isnull().any(): lis.append(col) for col in data2: if data2[col].isnull().any(): lis1.append(col) lis.remove('Cabin') lis1.remove('Cabin') data1['Age'].fillna(data1['Age'].median(), inplace=True) data1['Embarked'].fillna(data1['Embarked'].mode()[0], inplace=True) data2['Age'].fillna(data2['Age'].median(), inplace=True) data1['Fare'].fillna(data1['Fare'].median(), inplace=True) data2['Fare'].fillna(data2['Fare'].median(), inplace=True) drop_val = ['PassengerId', 'Cabin', 'Ticket'] data1.drop(drop_val, axis=1, inplace=True) for dataset in frame: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset['IsAlone'] = 1 dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 label = preprocessing.LabelEncoder() for d in frame: d['Sex'] = label.fit_transform(d['Sex']) d['Embarked'] = label.fit_transform(d['Embarked']) tar = ['Survived'] x_label = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'FamilySize', 'IsAlone'] data1.drop(['Name'], axis=1, inplace=True) data1.isnull().sum()
code
16147672/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') type(data_test)
code
16147672/cell_10
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data1 = data_train.copy(deep=True) data2 = data_test.copy(deep=True) frame = [data1, data2] lis = [] lis1 = [] for col in data1: if data1[col].isnull().any(): lis.append(col) for col in data2: if data2[col].isnull().any(): lis1.append(col) lis.remove('Cabin') lis1.remove('Cabin') data1['Age'].fillna(data1['Age'].median(), inplace=True) data1['Embarked'].fillna(data1['Embarked'].mode()[0], inplace=True) data2['Age'].fillna(data2['Age'].median(), inplace=True) data1['Fare'].fillna(data1['Fare'].median(), inplace=True) data2['Fare'].fillna(data2['Fare'].median(), inplace=True) drop_val = ['PassengerId', 'Cabin', 'Ticket'] data1.drop(drop_val, axis=1, inplace=True) for dataset in frame: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset['IsAlone'] = 1 dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 label = preprocessing.LabelEncoder() for d in frame: d['Sex'] = label.fit_transform(d['Sex']) d['Embarked'] = label.fit_transform(d['Embarked']) tar = ['Survived'] x_label = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'FamilySize', 'IsAlone'] data1.drop(['Name'], axis=1, inplace=True) data1.isnull().sum() data1
code
16147672/cell_12
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score,f1_score from sklearn.model_selection import GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from xgboost import XGBClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data1 = data_train.copy(deep=True) data2 = data_test.copy(deep=True) frame = [data1, data2] lis = [] lis1 = [] for col in data1: if data1[col].isnull().any(): lis.append(col) for col in data2: if data2[col].isnull().any(): lis1.append(col) lis.remove('Cabin') lis1.remove('Cabin') data1['Age'].fillna(data1['Age'].median(), inplace=True) data1['Embarked'].fillna(data1['Embarked'].mode()[0], inplace=True) data2['Age'].fillna(data2['Age'].median(), inplace=True) data1['Fare'].fillna(data1['Fare'].median(), inplace=True) data2['Fare'].fillna(data2['Fare'].median(), inplace=True) drop_val = ['PassengerId', 'Cabin', 'Ticket'] data1.drop(drop_val, axis=1, inplace=True) for dataset in frame: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset['IsAlone'] = 1 dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 label = preprocessing.LabelEncoder() for d in frame: d['Sex'] = label.fit_transform(d['Sex']) d['Embarked'] = label.fit_transform(d['Embarked']) tar = ['Survived'] x_label = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'FamilySize', 'IsAlone'] data1.drop(['Name'], axis=1, inplace=True) classes = [SVC(), RandomForestClassifier(), AdaBoostClassifier(), BaggingClassifier(), GradientBoostingClassifier(), GaussianNB(), XGBClassifier(), ExtraTreesClassifier()] params = [[{'kernel': ['rbf'], 'gamma': [0.001, 0.0001], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}], [{'n_estimators': [100, 200, 400], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [4, 5, 6, 7, 8], 'criterion': ['gini', 'entropy']}], [{'n_estimators': [100, 200, 300, 400, 500], 'learning_rate': [0.01, 0.1, 1]}], [{'n_estimators': [100, 200, 300, 400, 500], 'max_samples': [0.5, 0.75, 1.0]}], [{'loss': ['deviance'], 'n_estimators': [50, 100, 300, 400, 500], 'max_depth': [3, 5, 8]}], [{'priors': [None]}], [{'learning_rate': [0.03, 0.05], 'max_depth': [1, 2, 4, 6, 8, 10], 'n_estimators': [50, 100, 300, 500]}], [{'n_estimators': [100, 200, 400, 500], 'criterion': ['gini', 'entropy'], 'max_depth': [1, 2, 4, 6, 8, 10]}]] 'params1=[[ {\'n_estimators\': [100,200,400],\n\'learning_rate\':[0.01,0.1,1]}],[{\'n_estimators\': [100,200,400],\n\'max_samples\':[.5, .75, 1.0]}] ,[{"loss":["deviance"],"n_estimators":[50,100,300],\n"max_depth":[3,5,8]}] ,[{"priors":[None]}],[{\'learning_rate\':[.03, .05],\'max_depth\': [1,2,4,6,8,10],\'n_estimators\':[10, 50, 100, 300]} ],\n [{\'n_estimators\':[100,200,400],\'criterion\':[\'gini\', \'entropy\'],\'max_depth\':[1,2,4,6,8,10]}]]\n' testlist = [] paramslist = [] bestscore = [] bestestimator = [] for c, p in zip(classes, params): grid_search = GridSearchCV(estimator=c, param_grid=p, cv=5, n_jobs=-1) grid_search.fit(train_x, train_y.values.ravel()) best_param = grid_search.best_params_ paramslist.append(best_param) best_score = grid_search.best_score_ bestscore.append(best_score) best_estimator = grid_search.best_estimator_ bestestimator.append(best_estimator) ypred = best_estimator.predict(test_x) testlist.append(tuple((accuracy_score(test_y, ypred), f1_score(test_y, ypred)))) data3 = data2.copy(deep=True) data3.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) data3.drop(['PassengerId'], axis=1, inplace=True) yy = bestestimator[6].predict(data3) submission = pd.DataFrame({'PassengerId': data2['PassengerId'], 'Survived': yy}) submission.head() filename = 'Titanic Predictions 2.csv' submission.to_csv(filename, index=False) print('Saved file: ' + filename)
code
16147672/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data1 = data_train.copy(deep=True) data2 = data_test.copy(deep=True) frame = [data1, data2] lis = [] lis1 = [] print(data1.info()) print(data2.info()) for col in data1: if data1[col].isnull().any(): lis.append(col) for col in data2: if data2[col].isnull().any(): lis1.append(col) lis.remove('Cabin') lis1.remove('Cabin') data1['Age'].fillna(data1['Age'].median(), inplace=True) data1['Embarked'].fillna(data1['Embarked'].mode()[0], inplace=True) data2['Age'].fillna(data2['Age'].median(), inplace=True) data1['Fare'].fillna(data1['Fare'].median(), inplace=True) data2['Fare'].fillna(data2['Fare'].median(), inplace=True) drop_val = ['PassengerId', 'Cabin', 'Ticket'] data1.drop(drop_val, axis=1, inplace=True) for dataset in frame: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset['IsAlone'] = 1 dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 label = preprocessing.LabelEncoder() for d in frame: d['Sex'] = label.fit_transform(d['Sex']) d['Embarked'] = label.fit_transform(d['Embarked']) tar = ['Survived'] x_label = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'FamilySize', 'IsAlone'] data1.drop(['Name'], axis=1, inplace=True)
code
1005486/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns)) df.describe()
code
1005486/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/epi_r.csv') df.head()
code
1005486/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns))
code
1005486/cell_5
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/epi_r.csv') sorted(list(df.columns)) df.pivot_table(index=['rating'], columns=['fat'], aggfunc=np.mean)
code
89131571/cell_13
[ "text_html_output_1.png" ]
import csv import pandas as pd import sqlite3 path = './' database = path + 'ted-talk-data.sqlite' conn = sqlite3.connect(database) create_table = 'CREATE TABLE tedtalk(\n title TEXT,\n author TEXT,\n date DATE,\n views INTEGER,\n likes INTEGER,\n link TEXT);\n ' cursor = conn.cursor() cursor.execute(create_table) file = open('../input/ted-talks/data.csv') content = list(csv.reader(file)) content = content[1:] file.close() sql_test = pd.read_sql('SELECT * FROM tedtalk LIMIT 5', conn) sql_test
code
17141744/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48) learn = language_model_learner(data, AWD_LSTM, drop_mult=0.3) learn.lr_find() learn.fit_one_cycle(10, 0.01)
code
17141744/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48) data.show_batch()
code
17141744/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) train.head()
code
17141744/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48) test_datalist = TextList.from_df(test, cols='Phrase', vocab=data.vocab) data_clas = TextList.from_df(train, cols='Phrase', vocab=data.vocab).split_by_rand_pct(0.2).label_from_df(cols='Sentiment', classes=['1', '2', '3', '4', '0'], label_cls=CategoryList).add_test(test_datalist).databunch(bs=32) learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5) learn_classifier.load_encoder('fine_tuned_enc') learn_classifier.freeze() learn_classifier.lr_find() learn_classifier.fit_one_cycle(10, 0.01) learn_classifier.show_results()
code
17141744/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48) learn = language_model_learner(data, AWD_LSTM, drop_mult=0.3) learn.lr_find()
code
17141744/cell_19
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48) test_datalist = TextList.from_df(test, cols='Phrase', vocab=data.vocab) data_clas = TextList.from_df(train, cols='Phrase', vocab=data.vocab).split_by_rand_pct(0.2).label_from_df(cols='Sentiment', classes=['1', '2', '3', '4', '0'], label_cls=CategoryList).add_test(test_datalist).databunch(bs=32) learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5) learn_classifier.load_encoder('fine_tuned_enc') learn_classifier.freeze() learn_classifier.lr_find() learn_classifier.fit_one_cycle(10, 0.01)
code
17141744/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17141744/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) test['Phrase'][0]
code
17141744/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') train['Sentiment'] = train['Sentiment'].apply(str) data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48) test_datalist = TextList.from_df(test, cols='Phrase', vocab=data.vocab) data_clas = TextList.from_df(train, cols='Phrase', vocab=data.vocab).split_by_rand_pct(0.2).label_from_df(cols='Sentiment', classes=['1', '2', '3', '4', '0'], label_cls=CategoryList).add_test(test_datalist).databunch(bs=32) learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5) learn_classifier.load_encoder('fine_tuned_enc') learn_classifier.freeze() learn_classifier.lr_find() learn_classifier.recorder.plot()
code