path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
130004668/cell_29
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train)
code
130004668/cell_26
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import StandardScaler, OrdinalEncoder from tqdm import tqdm import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) tfidf = TfidfVectorizer(min_df=5, max_df=0.95, max_features=8000, stop_words='english') tfidf.fit(df_reviews.url) url_tfidf = tfidf.transform(df_reviews.url) tfidf.fit(df_reviews.content) content_tfidf = tfidf.transform(df_reviews.content) label_true = df_reviews[df_reviews['label'] == 1.0] label_false = df_reviews[df_reviews['label'] == 0.0] print(label_true.shape, label_false.shape, df_reviews.shape)
code
130004668/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews.label.describe()
code
130004668/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import warnings import nltk import string import re import sklearn import pandas as pd import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler, OrdinalEncoder from sklearn.cluster import MiniBatchKMeans from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from scipy import stats import tensorflow as tf from tensorflow.keras.layers import Embedding, Conv1D, LSTM, Dense, GlobalMaxPooling1D from tensorflow.keras.models import Sequential from sklearn.model_selection import train_test_split from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import warnings warnings.simplefilter(action='ignore', category=FutureWarning)
code
130004668/cell_18
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.preprocessing import StandardScaler, OrdinalEncoder from tqdm import tqdm import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) print('Before Preprocessing:') print(df_reviews.content.head()) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) print('After Preprocessing:') print(df_reviews.content.head())
code
130004668/cell_32
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train) grid.score(X_test, y_test)
code
130004668/cell_16
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler, OrdinalEncoder import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) df_reviews.head()
code
130004668/cell_35
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler, OrdinalEncoder from sklearn.tree import DecisionTreeClassifier from tqdm import tqdm import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train) grid.score(X_test, y_test) param_grid = [{'n_estimators': [x for x in range(10, 120, 10)], 'criterion': ['gini', 'entropy']}] grid = GridSearchCV(estimator=RandomForestClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_
code
130004668/cell_31
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train)
code
130004668/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_trimmed.label.describe()
code
130004668/cell_37
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler, OrdinalEncoder from sklearn.tree import DecisionTreeClassifier from tqdm import tqdm import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train) grid.score(X_test, y_test) param_grid = [{'n_estimators': [x for x in range(10, 120, 10)], 'criterion': ['gini', 'entropy']}] grid = GridSearchCV(estimator=RandomForestClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train) grid.score(X_test, y_test)
code
130004668/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews.head()
code
130004668/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum()
code
130004668/cell_36
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler, OrdinalEncoder from sklearn.tree import DecisionTreeClassifier from tqdm import tqdm import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train) grid.score(X_test, y_test) param_grid = [{'n_estimators': [x for x in range(10, 120, 10)], 'criterion': ['gini', 'entropy']}] grid = GridSearchCV(estimator=RandomForestClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train)
code
105200358/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') df.describe()
code
105200358/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
print(data)
code
105200358/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105200358/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') df.head()
code
105200358/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/penguins/penguins.csv') df.info()
code
106196903/cell_21
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') text_vizual('tech', removed_punch_x_train)
code
106196903/cell_13
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) y_train = x_train['category'] x_train = x_train['text'] y_train
code
106196903/cell_9
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) sns.countplot(x_test['category']) plt.grid(True, axis='y') plt.yticks(np.arange(0, 150, 20)) plt.title('Count of X_test Categories') plt.show()
code
106196903/cell_4
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum()
code
106196903/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() sns.countplot(df['category']) plt.grid(True, axis='y') plt.yticks(np.arange(0, 550, 50)) plt.title('Count of Categories') plt.show()
code
106196903/cell_40
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) y_train = x_train['category'] x_train = x_train['text'] le = LabelEncoder() le.fit(y_train) le.classes_
code
106196903/cell_29
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') from nltk.tokenize import word_tokenize import string def remove_stop_words(data): cleaned_data = [] for sentences in data: tokenized = word_tokenize(sentences, 'english') new_sentence = [token for token in tokenized if token not in stopwords.words('english')] sent = '' for text in new_sentence: sent += text + ' ' cleaned_data.append(sent) return np.array(cleaned_data) cleaned_x_train = remove_stop_words(removed_punch_x_train) cleaned_x_test = remove_stop_words(removed_punch_x_test) (cleaned_x_train.shape, cleaned_x_test.shape) text_vizual('tech', cleaned_x_train)
code
106196903/cell_39
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) y_train = x_train['category'] x_train = x_train['text'] le = LabelEncoder() le.fit(y_train)
code
106196903/cell_48
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.text import Tokenizer from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') from nltk.tokenize import word_tokenize import string def remove_stop_words(data): cleaned_data = [] for sentences in data: tokenized = word_tokenize(sentences, 'english') new_sentence = [token for token in tokenized if token not in stopwords.words('english')] sent = '' for text in new_sentence: sent += text + ' ' cleaned_data.append(sent) return np.array(cleaned_data) cleaned_x_train = remove_stop_words(removed_punch_x_train) cleaned_x_test = remove_stop_words(removed_punch_x_test) (cleaned_x_train.shape, cleaned_x_test.shape) tokenizer = Tokenizer() allDocs = list(cleaned_x_train) + list(cleaned_x_test) tokenizer.fit_on_texts(allDocs) tokenizer.document_count document_count = tokenizer.document_count vocab_size = len(tokenizer.word_index) print(f'document_count: {document_count}\nvocab_size: {vocab_size}')
code
106196903/cell_2
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import nltk import pandas as pd import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import seaborn as sns import plotly_express as px from sklearn.model_selection import train_test_split from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator from warnings import filterwarnings from tensorflow.keras.utils import to_categorical import nltk from nltk.corpus import stopwords nltk.download('stopwords') filterwarnings('ignore')
code
106196903/cell_19
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) print(f'train: {removed_punch_x_train.shape},\ntest:, {removed_punch_x_test.shape}')
code
106196903/cell_7
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape)
code
106196903/cell_49
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.text import Tokenizer from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') from nltk.tokenize import word_tokenize import string def remove_stop_words(data): cleaned_data = [] for sentences in data: tokenized = word_tokenize(sentences, 'english') new_sentence = [token for token in tokenized if token not in stopwords.words('english')] sent = '' for text in new_sentence: sent += text + ' ' cleaned_data.append(sent) return np.array(cleaned_data) cleaned_x_train = remove_stop_words(removed_punch_x_train) cleaned_x_test = remove_stop_words(removed_punch_x_test) (cleaned_x_train.shape, cleaned_x_test.shape) tokenizer = Tokenizer() allDocs = list(cleaned_x_train) + list(cleaned_x_test) tokenizer.fit_on_texts(allDocs) tokenizer.document_count document_count = tokenizer.document_count vocab_size = len(tokenizer.word_index) allDocs_sequence = tokenizer.texts_to_sequences(allDocs) max_length = max([len(x) for x in allDocs_sequence]) max_length
code
106196903/cell_18
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) (x_test[1664][:350], '........................', removed_punch_x_test[0][:350])
code
106196903/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) sns.countplot(x_train['category']) plt.grid(True, axis='y') plt.yticks(np.arange(0, 550, 50)) plt.title('Count of X_train Categories') plt.show()
code
106196903/cell_15
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape)
code
106196903/cell_16
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape)
code
106196903/cell_47
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.text import Tokenizer from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') from nltk.tokenize import word_tokenize import string def remove_stop_words(data): cleaned_data = [] for sentences in data: tokenized = word_tokenize(sentences, 'english') new_sentence = [token for token in tokenized if token not in stopwords.words('english')] sent = '' for text in new_sentence: sent += text + ' ' cleaned_data.append(sent) return np.array(cleaned_data) cleaned_x_train = remove_stop_words(removed_punch_x_train) cleaned_x_test = remove_stop_words(removed_punch_x_test) (cleaned_x_train.shape, cleaned_x_test.shape) tokenizer = Tokenizer() allDocs = list(cleaned_x_train) + list(cleaned_x_test) tokenizer.fit_on_texts(allDocs) tokenizer.document_count
code
106196903/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df
code
106196903/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) x_test
code
106196903/cell_31
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') text_vizual('tech', removed_punch_x_train)
code
106196903/cell_22
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') text_vizual('business', removed_punch_x_train)
code
106196903/cell_27
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from sklearn.model_selection import train_test_split from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns import string import string import tqdm df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() plt.yticks(np.arange(0, 550, 50)) x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) plt.yticks(np.arange(0, 550, 50)) plt.yticks(np.arange(0, 150, 20)) y_train = x_train['category'] x_train = x_train['text'] y_test = x_test['category'] x_test = x_test['text'] import re import string def remove_punch(data): cleaned_data = [] for sentences in data: new_sentence = sentences.translate(str.maketrans('', '', string.punctuation)) new_sentence = re.sub('[\\n\\t]*', '', new_sentence) new_sentence = re.sub(' ', '', new_sentence) cleaned_data.append(new_sentence) return np.array(cleaned_data) removed_punch_x_train = remove_punch(x_train) (x_train.shape, removed_punch_x_train.shape) removed_punch_x_test = remove_punch(x_test) (x_test.shape, removed_punch_x_test.shape) import tqdm def text_vizual(category, data_train): text = '' for txt in tqdm.tqdm(data_train[y_train == f'{category}']): text += txt wordcloud_spam = WordCloud(background_color='white').generate(text) plt.axis('off') from nltk.tokenize import word_tokenize import string def remove_stop_words(data): cleaned_data = [] for sentences in data: tokenized = word_tokenize(sentences, 'english') new_sentence = [token for token in tokenized if token not in stopwords.words('english')] sent = '' for text in new_sentence: sent += text + ' ' cleaned_data.append(sent) return np.array(cleaned_data) cleaned_x_train = remove_stop_words(removed_punch_x_train) cleaned_x_test = remove_stop_words(removed_punch_x_test) (cleaned_x_train.shape, cleaned_x_test.shape)
code
106196903/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/bbc-fulltext-and-category/bbc-text.csv') df df.isna().sum() x_train, x_test = train_test_split(df, random_state=1, test_size=0.2) (x_train.shape, x_test.shape) y_train = x_train['category'] x_train = x_train['text'] x_train
code
73064882/cell_4
[ "text_html_output_2.png" ]
import pandas as pd import plotly.express as px glass = pd.read_csv('/kaggle/input/glass/glass.csv') fig = px.scatter(glass, x='Mg', y='Fe', color='Type', color_continuous_scale='portland') fig.show()
code
73064882/cell_3
[ "text_html_output_1.png" ]
import pandas as pd glass = pd.read_csv('/kaggle/input/glass/glass.csv') glass
code
73064882/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.subplots import make_subplots from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import pandas as pd import plotly.express as px import plotly.graph_objects as go glass = pd.read_csv('/kaggle/input/glass/glass.csv') fig = px.scatter(glass, x="Mg", y="Fe", color='Type', color_continuous_scale='portland') fig.show() from sklearn.discriminant_analysis import LinearDiscriminantAnalysis lda_model = LinearDiscriminantAnalysis() lda_model.fit(X, y) fig = make_subplots(rows=1, cols=1) fig.add_trace(go.Contour(x=X['Mg'], y=X['Fe'], z=lda_model.predict(X), showscale=False, opacity=0.4, colorscale='portland'), row=1, col=1) fig.add_trace(go.Scatter(x=X['Mg'], y=X['Fe'], text=y, mode='markers', marker_symbol=y, marker=dict(color=y, colorscale='portland')), row=1, col=1) fig.update_layout(showlegend=False) fig.show()
code
73089893/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum()
code
73089893/cell_13
[ "text_html_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test.head()
code
73089893/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() print('List of Numerical Columns :\n', nums, '\n') print('List of Categorical Columns :\n', cats, '\n')
code
73089893/cell_30
[ "text_html_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') train[nums].describe()
code
73089893/cell_44
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import seaborn as sns import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') for i in range(0, len(nums)): plt.tight_layout() for i in range(0, len(nums)): plt.tight_layout() for col in cats: plt.figure(figsize=(20, 5)) plt.subplot(141) ax = sns.countplot(y=train[col], order=train[col].value_counts().index) for p in ax.patches: value = format(p.get_width(), '.0f') x = p.get_x() + 10 + p.get_width() y = p.get_y() + p.get_height() / 2 + 0.05 ax.annotate(value, (x, y), size=14) plt.xlabel('') plt.ylabel('') plt.xticks([], []) plt.title(col, size=16, weight='bold') plt.tight_layout() plt.subplot(142) train[col].value_counts().plot.pie(autopct='%1.1f%%', textprops={'fontsize': 12}) plt.title('Percentage of ' + col, size=16, weight='bold') plt.ylabel('')
code
73089893/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum()
code
73089893/cell_39
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import seaborn as sns import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') for i in range(0, len(nums)): plt.tight_layout() plt.figure(figsize=(20, 15)) for i in range(0, len(nums)): plt.subplot(3, len(nums) / 3, i + 1) sns.boxplot(y=train[nums[i]]) plt.tight_layout()
code
73089893/cell_41
[ "text_plain_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') for col in nums: Q1 = train[col].quantile(0.25) Q3 = train[col].quantile(0.75) IQR = Q3 - Q1 nilai_min = train[col].min() nilai_max = train[col].max() lower_lim = Q1 - 1.5 * IQR upper_lim = Q3 + 1.5 * IQR if nilai_min < lower_lim: print('Low outlier is found in column', col, '<', lower_lim, '\n') print('Total of Low Outlier in column', col, ':', len(list(train[train[col] < lower_lim].index)), '\n') elif nilai_max > upper_lim: print('High outlier is found in column', col, '>', upper_lim, '\n') print('Total of High Outlier in column', col, ':', len(list(train[train[col] > upper_lim].index)), '\n') else: print('Outlier is not found in column', col, '\n')
code
73089893/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73089893/cell_32
[ "text_html_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') train[cats].describe()
code
73089893/cell_17
[ "text_html_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.info()
code
73089893/cell_35
[ "text_plain_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') len(nums)
code
73089893/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') train.head()
code
73089893/cell_36
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.style as style import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.io as pio import seaborn as sns import warnings import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns style.use('fivethirtyeight') import plotly.express as px import plotly.graph_objs as go import plotly.offline as py import plotly.io as pio pio.renderers.default = 'iframe' import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer, RobustScaler, OrdinalEncoder from scipy.stats import chi2_contingency from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score, StratifiedKFold from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') test['target'] = 0 test['train_or_test'] = 'ts' train['train_or_test'] = 'tr' combine = pd.concat([train, test]) combine.duplicated().sum() combine.duplicated(subset='id').sum() nums = combine.select_dtypes(exclude='object').keys().tolist() cats = combine.select_dtypes(include='object').keys().tolist() nums.remove('id') cats.remove('train_or_test') plt.figure(figsize=(20, 13)) for i in range(0, len(nums)): plt.subplot(3, len(nums) / 3, i + 1) sns.histplot(train[nums[i]], kde=True) plt.tight_layout()
code
17102352/cell_13
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import os import pandas as pd import random # Randomly select a filename for viewing import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import random from keras.preprocessing.image import load_img from PIL import Image from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.optimizers import RMSprop from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split main_folder = '../input' input_file_train = 'train/train' input_file_test = 'test1/test1' filenames = os.listdir(os.path.join(main_folder, input_file_train)) categories = [] for file in filenames: category = file.split('.')[0].lower() categories.append(category) dataframe = pd.DataFrame({'filename': filenames, 'categories': categories}) sample = random.choice(filenames) from PIL import Image im = Image.open(os.path.join(main_folder, input_file_train, sample)) im.size random_seed = 10 batch_size = 100 image_height, image_width, image_channels = (128, 128, 3) train_df, val_df = train_test_split(dataframe, test_size=0.2, random_state=random_seed) train_df.reset_index(inplace=True, drop=True) val_df.reset_index(inplace=True, drop=True) train_datagen = ImageDataGenerator(rotation_range=15, rescale=1.0 / 255, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, shear_range=0.2, zoom_range=0.2) train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) val_datagen = ImageDataGenerator(rescale=1.0 / 255) val_generator = val_datagen.flow_from_dataframe(dataframe=val_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) earlystop = EarlyStopping(monitor='val_acc', patience=10) reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.75, verbose=1, patience=2, min_lr=1e-05) checkpoint = ModelCheckpoint(filepath=os.path.join(main_folder, 'best weights.h5'), monitor='val_acc', save_best_only=True) callbacks = [earlystop, reduce_lr] test_files = os.listdir(os.path.join(main_folder, input_file_test)) test_df = pd.DataFrame({'filename': test_files}) test_datagen = ImageDataGenerator(rescale=1.0 / 255) test_generator = test_datagen.flow_from_dataframe(dataframe=test_df, directory=os.path.join(main_folder, input_file_test), x_col='filename', y_col=None, target_size=(image_height, image_width), class_mode=None, batch_size=batch_size, shuffle=False)
code
17102352/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import random from keras.preprocessing.image import load_img from PIL import Image from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.optimizers import RMSprop from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split main_folder = '../input' input_file_train = 'train/train' input_file_test = 'test1/test1'
code
17102352/cell_7
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.models import Sequential dropout_rate = 0.25 fc_units_1 = 512 fc_units_2 = 256 output_units = 2 epochs = 50 model = Sequential() model.add(Conv2D(filters=32, kernel_size=3, strides=1, padding='same', input_shape=(image_height, image_width, image_channels), data_format='channels_last')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(filters=64, kernel_size=3, strides=1, padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(filters=128, kernel_size=3, strides=1, padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Flatten()) model.add(Dense(units=fc_units_1, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=fc_units_2, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=output_units, activation='softmax')) model.summary()
code
17102352/cell_3
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image import matplotlib.pyplot as plt import os import pandas as pd import random # Randomly select a filename for viewing import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import random from keras.preprocessing.image import load_img from PIL import Image from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.optimizers import RMSprop from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split main_folder = '../input' input_file_train = 'train/train' input_file_test = 'test1/test1' filenames = os.listdir(os.path.join(main_folder, input_file_train)) categories = [] for file in filenames: category = file.split('.')[0].lower() categories.append(category) dataframe = pd.DataFrame({'filename': filenames, 'categories': categories}) sample = random.choice(filenames) plt.imshow(load_img(os.path.join(main_folder, input_file_train, sample))) from PIL import Image im = Image.open(os.path.join(main_folder, input_file_train, sample)) im.size
code
17102352/cell_10
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import Image from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.models import Sequential from keras.optimizers import RMSprop from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import os import pandas as pd import random # Randomly select a filename for viewing import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import random from keras.preprocessing.image import load_img from PIL import Image from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.optimizers import RMSprop from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split main_folder = '../input' input_file_train = 'train/train' input_file_test = 'test1/test1' filenames = os.listdir(os.path.join(main_folder, input_file_train)) categories = [] for file in filenames: category = file.split('.')[0].lower() categories.append(category) dataframe = pd.DataFrame({'filename': filenames, 'categories': categories}) sample = random.choice(filenames) from PIL import Image im = Image.open(os.path.join(main_folder, input_file_train, sample)) im.size random_seed = 10 batch_size = 100 image_height, image_width, image_channels = (128, 128, 3) train_df, val_df = train_test_split(dataframe, test_size=0.2, random_state=random_seed) train_df.reset_index(inplace=True, drop=True) val_df.reset_index(inplace=True, drop=True) train_datagen = ImageDataGenerator(rotation_range=15, rescale=1.0 / 255, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, shear_range=0.2, zoom_range=0.2) train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) val_datagen = ImageDataGenerator(rescale=1.0 / 255) val_generator = val_datagen.flow_from_dataframe(dataframe=val_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) dropout_rate = 0.25 fc_units_1 = 512 fc_units_2 = 256 output_units = 2 epochs = 50 model = Sequential() model.add(Conv2D(filters=32, kernel_size=3, strides=1, padding='same', input_shape=(image_height, image_width, image_channels), data_format='channels_last')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(filters=64, kernel_size=3, strides=1, padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(filters=128, kernel_size=3, strides=1, padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Flatten()) model.add(Dense(units=fc_units_1, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=fc_units_2, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=output_units, activation='softmax')) model.summary() optimizer = RMSprop() model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) earlystop = EarlyStopping(monitor='val_acc', patience=10) reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.75, verbose=1, patience=2, min_lr=1e-05) checkpoint = ModelCheckpoint(filepath=os.path.join(main_folder, 'best weights.h5'), monitor='val_acc', save_best_only=True) callbacks = [earlystop, reduce_lr] history = model.fit_generator(generator=train_generator, steps_per_epoch=len(train_df) // batch_size, epochs=epochs, callbacks=callbacks, validation_data=val_generator, validation_steps=len(val_df) // batch_size)
code
17102352/cell_12
[ "text_plain_output_1.png" ]
from PIL import Image from PIL import Image from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.models import Sequential from keras.optimizers import RMSprop from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random # Randomly select a filename for viewing import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import random from keras.preprocessing.image import load_img from PIL import Image from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.optimizers import RMSprop from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split main_folder = '../input' input_file_train = 'train/train' input_file_test = 'test1/test1' filenames = os.listdir(os.path.join(main_folder, input_file_train)) categories = [] for file in filenames: category = file.split('.')[0].lower() categories.append(category) dataframe = pd.DataFrame({'filename': filenames, 'categories': categories}) sample = random.choice(filenames) from PIL import Image im = Image.open(os.path.join(main_folder, input_file_train, sample)) im.size random_seed = 10 batch_size = 100 image_height, image_width, image_channels = (128, 128, 3) train_df, val_df = train_test_split(dataframe, test_size=0.2, random_state=random_seed) train_df.reset_index(inplace=True, drop=True) val_df.reset_index(inplace=True, drop=True) train_datagen = ImageDataGenerator(rotation_range=15, rescale=1.0 / 255, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, shear_range=0.2, zoom_range=0.2) train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) val_datagen = ImageDataGenerator(rescale=1.0 / 255) val_generator = val_datagen.flow_from_dataframe(dataframe=val_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) dropout_rate = 0.25 fc_units_1 = 512 fc_units_2 = 256 output_units = 2 epochs = 50 model = Sequential() model.add(Conv2D(filters=32, kernel_size=3, strides=1, padding='same', input_shape=(image_height, image_width, image_channels), data_format='channels_last')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(filters=64, kernel_size=3, strides=1, padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Conv2D(filters=128, kernel_size=3, strides=1, padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(rate=dropout_rate)) model.add(Flatten()) model.add(Dense(units=fc_units_1, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=fc_units_2, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=output_units, activation='softmax')) model.summary() optimizer = RMSprop() model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) earlystop = EarlyStopping(monitor='val_acc', patience=10) reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.75, verbose=1, patience=2, min_lr=1e-05) checkpoint = ModelCheckpoint(filepath=os.path.join(main_folder, 'best weights.h5'), monitor='val_acc', save_best_only=True) callbacks = [earlystop, reduce_lr] history = model.fit_generator(generator=train_generator, steps_per_epoch=len(train_df) // batch_size, epochs=epochs, callbacks=callbacks, validation_data=val_generator, validation_steps=len(val_df) // batch_size) fig, ax = plt.subplots(2, 1, figsize=(12, 12)) ax[0].plot(history.history['loss'], color='b', label='Training loss') ax[0].plot(history.history['val_loss'], color='r', label='Validation loss') ax[0].set_xticks(np.arange(1, epochs, 1)) ax[0].set_yticks(np.arange(0, 2.3, 0.2)) ax[0].legend(loc='best', shadow=True) ax[1].plot(history.history['acc'], color='b', label='Training accuracy') ax[1].plot(history.history['val_acc'], color='r', label='Valication accuracy') ax[1].set_xticks(np.arange(1, epochs)) ax[1].legend(loc='best', shadow=True) plt.tight_layout() plt.show()
code
17102352/cell_5
[ "image_output_1.png" ]
from PIL import Image from PIL import Image from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import os import pandas as pd import random # Randomly select a filename for viewing import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import random from keras.preprocessing.image import load_img from PIL import Image from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization from keras.optimizers import RMSprop from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split main_folder = '../input' input_file_train = 'train/train' input_file_test = 'test1/test1' filenames = os.listdir(os.path.join(main_folder, input_file_train)) categories = [] for file in filenames: category = file.split('.')[0].lower() categories.append(category) dataframe = pd.DataFrame({'filename': filenames, 'categories': categories}) sample = random.choice(filenames) from PIL import Image im = Image.open(os.path.join(main_folder, input_file_train, sample)) im.size random_seed = 10 batch_size = 100 image_height, image_width, image_channels = (128, 128, 3) train_df, val_df = train_test_split(dataframe, test_size=0.2, random_state=random_seed) train_df.reset_index(inplace=True, drop=True) val_df.reset_index(inplace=True, drop=True) train_datagen = ImageDataGenerator(rotation_range=15, rescale=1.0 / 255, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, shear_range=0.2, zoom_range=0.2) train_generator = train_datagen.flow_from_dataframe(dataframe=train_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size) val_datagen = ImageDataGenerator(rescale=1.0 / 255) val_generator = val_datagen.flow_from_dataframe(dataframe=val_df, directory=os.path.join(main_folder, input_file_train), x_col='filename', y_col='categories', target_size=(image_height, image_width), class_mode='categorical', batch_size=batch_size)
code
2037446/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Imputer num_pipeline = Pipeline([('imputer', Imputer(strategy='median')), ('attributes_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler())]) housing_num_tr = num_pipeline.fit_transform(housing_num)
code
2037446/cell_13
[ "text_html_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing.plot(kind='scatter', x='median_income', y='median_house_value', alpha=0.1)
code
2037446/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['ocean_proximity'].value_counts()
code
2037446/cell_23
[ "text_plain_output_1.png" ]
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.linear_model import LinearRegression from sklearn.model_selection import StratifiedShuffleSplit from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing = strat_train_set.drop('median_house_value', axis=1) housing_labels = strat_train_set['median_house_value'].copy() median = housing['total_bedrooms'].median() housing['total_bedrooms'].fillna(median) from sklearn.base import BaseEstimator, TransformerMixin rooms_ix, bedroom_ix, population_ix, household_ix = (3, 4, 5, 6) class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room=True): self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self def transform(self, X, y=None): rooms_per_household = X[:, rooms_ix] / X[:, household_ix] population_per_household = X[:, population_ix] / X[:, household_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedroom_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Imputer num_pipeline = Pipeline([('imputer', Imputer(strategy='median')), ('attributes_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler())]) housing_num_tr = num_pipeline.fit_transform(housing_num) from sklearn.pipeline import FeatureUnion class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names].values class CustomBinarizer(BaseEstimator, TransformerMixin): def fit(self, X, y=None, **fit_params): return self def transform(self, X): return LabelBinarizer().fit(X).transform(X) num_attribs = list(housing_num) cat_attribs = ['ocean_proximity'] num_pipeline = Pipeline([('selector', DataFrameSelector(num_attribs)), ('imputer', Imputer(strategy='median')), ('attributes_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler())]) cat_pipeline = Pipeline([('selector', DataFrameSelector(cat_attribs)), ('custom_binarizer', CustomBinarizer())]) full_pipeline = FeatureUnion(transformer_list=[('num_pipeline', num_pipeline), ('cat_pipeline', cat_pipeline)]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared[0, :] from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.fit_transform(some_data) some_data_prepared
code
2037446/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt housing.hist(bins=50, figsize=(20, 15)) plt.show()
code
2037446/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing.head()
code
2037446/cell_11
[ "text_plain_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False)
code
2037446/cell_19
[ "text_plain_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing = strat_train_set.drop('median_house_value', axis=1) housing_labels = strat_train_set['median_house_value'].copy() median = housing['total_bedrooms'].median() housing['total_bedrooms'].fillna(median) from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() housing_cat = housing['ocean_proximity'] housing_cat_encoded = encoder.fit_transform(housing_cat) housing_cat_encoded housing_cat.shape from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder() housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1)) housing_cat_1hot housing_cat_1hot.toarray() from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() housing_cat_1hot = encoder.fit_transform(housing_cat) housing_cat_1hot
code
2037446/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2037446/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') from sklearn.model_selection import train_test_split train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42) print(len(train_set), 'train +', len(test_set), 'test')
code
2037446/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing = strat_train_set.drop('median_house_value', axis=1) housing_labels = strat_train_set['median_house_value'].copy() median = housing['total_bedrooms'].median() housing['total_bedrooms'].fillna(median) from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() housing_cat = housing['ocean_proximity'] housing_cat_encoded = encoder.fit_transform(housing_cat) housing_cat_encoded housing_cat.shape from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder() housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1)) housing_cat_1hot housing_cat_1hot.toarray()
code
2037446/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] strat_train_set.head()
code
2037446/cell_16
[ "text_plain_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing = strat_train_set.drop('median_house_value', axis=1) housing_labels = strat_train_set['median_house_value'].copy() median = housing['total_bedrooms'].median() housing['total_bedrooms'].fillna(median) housing.info() print(median)
code
2037446/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing.info()
code
2037446/cell_17
[ "text_html_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing = strat_train_set.drop('median_house_value', axis=1) housing_labels = strat_train_set['median_house_value'].copy() median = housing['total_bedrooms'].median() housing['total_bedrooms'].fillna(median) from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() housing_cat = housing['ocean_proximity'] housing_cat_encoded = encoder.fit_transform(housing_cat) housing_cat_encoded housing_cat.shape
code
2037446/cell_14
[ "image_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False)
code
2037446/cell_22
[ "text_plain_output_1.png" ]
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.model_selection import StratifiedShuffleSplit from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing['rooms_per_household'] = housing['total_rooms'] / housing['households'] housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms'] housing['population_per_household'] = housing['population'] / housing['households'] corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing = strat_train_set.drop('median_house_value', axis=1) housing_labels = strat_train_set['median_house_value'].copy() median = housing['total_bedrooms'].median() housing['total_bedrooms'].fillna(median) from sklearn.base import BaseEstimator, TransformerMixin rooms_ix, bedroom_ix, population_ix, household_ix = (3, 4, 5, 6) class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room=True): self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self def transform(self, X, y=None): rooms_per_household = X[:, rooms_ix] / X[:, household_ix] population_per_household = X[:, population_ix] / X[:, household_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedroom_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import Imputer num_pipeline = Pipeline([('imputer', Imputer(strategy='median')), ('attributes_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler())]) housing_num_tr = num_pipeline.fit_transform(housing_num) from sklearn.pipeline import FeatureUnion class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names].values class CustomBinarizer(BaseEstimator, TransformerMixin): def fit(self, X, y=None, **fit_params): return self def transform(self, X): return LabelBinarizer().fit(X).transform(X) num_attribs = list(housing_num) cat_attribs = ['ocean_proximity'] num_pipeline = Pipeline([('selector', DataFrameSelector(num_attribs)), ('imputer', Imputer(strategy='median')), ('attributes_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler())]) cat_pipeline = Pipeline([('selector', DataFrameSelector(cat_attribs)), ('custom_binarizer', CustomBinarizer())]) full_pipeline = FeatureUnion(transformer_list=[('num_pipeline', num_pipeline), ('cat_pipeline', cat_pipeline)]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared[0, :]
code
2037446/cell_10
[ "text_html_output_1.png" ]
from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=housing['population'] / 100, label='population', c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True) plt.legend()
code
2037446/cell_12
[ "text_plain_output_1.png" ]
from pandas.tools.plotting import scatter_matrix from sklearn.model_selection import StratifiedShuffleSplit import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing['income_cat'] = np.ceil(housing['median_income'] / 1.5) housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing['income_cat']): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] housing = strat_train_set.copy() corr_matrix = housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) from pandas.tools.plotting import scatter_matrix attributes = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age'] scatter_matrix(housing[attributes], figsize=(12, 8))
code
2037446/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing = pd.read_csv('../input/housing.csv') housing.describe()
code
17097145/cell_13
[ "text_plain_output_1.png" ]
from PIL import Image import cv2 as cv import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision.transforms as transforms train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') target_dict = {0: 'No DR(Diabetic Retinopathy)', 1: 'Mild', 2: 'Moderate', 3: 'Severe', 4: 'Proliferative DR(Diabetic Retinopathy)'} class MyDataset(torch.utils.data.Dataset): def __init__(self, dir_img_path=None, dir_csv_path=None, Usage=None, transform=None): super().__init__() '\n Usage:\n ' self.DIR_PATH = '../input' self.df = pd.read_csv(dir_csv_path) self.dir_img_path = dir_img_path self.images = self.loadImage(self.dir_img_path) self.Usage = Usage self.transform = transform def loadImage(self, path): return os.listdir(path) def __getitem__(self, pos): obj = self.df.loc[pos] img_id = obj['id_code'] if self.Usage == 'Training': label = obj['diagnosis'] img_id = '{}.png'.format(img_id) img = cv.imread(os.path.join(self.dir_img_path, img_id)) img = Image.fromarray(img) if self.transform: img = self.transform(img) if self.Usage == 'Training': return (img, label) return (img, obj['id_code']) def change_type(self, img, label): return (img.astype(np.float32), label.astype(np.long)) def read(self, image): return cv.imread(image) def reshape(self, image): return cv.resize(image, (244, 244)) def __len__(self): return len(self.df) transformation = transforms.Compose([transforms.Resize((224, 224)), transforms.ColorJitter(0.1), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) myDataset = MyDataset('../input/train_images', '../input/train.csv', transform=transformation, Usage='Training') testData = MyDataset('../input/test_images', '../input/test.csv', transform=transformation, Usage='Testing')
code
17097145/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from sklearn.metrics import accuracy_score import cv2 as cv import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.sampler as sampler import torchvision.transforms as transforms train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') target_dict = {0: 'No DR(Diabetic Retinopathy)', 1: 'Mild', 2: 'Moderate', 3: 'Severe', 4: 'Proliferative DR(Diabetic Retinopathy)'} class MyDataset(torch.utils.data.Dataset): def __init__(self, dir_img_path=None, dir_csv_path=None, Usage=None, transform=None): super().__init__() '\n Usage:\n ' self.DIR_PATH = '../input' self.df = pd.read_csv(dir_csv_path) self.dir_img_path = dir_img_path self.images = self.loadImage(self.dir_img_path) self.Usage = Usage self.transform = transform def loadImage(self, path): return os.listdir(path) def __getitem__(self, pos): obj = self.df.loc[pos] img_id = obj['id_code'] if self.Usage == 'Training': label = obj['diagnosis'] img_id = '{}.png'.format(img_id) img = cv.imread(os.path.join(self.dir_img_path, img_id)) img = Image.fromarray(img) if self.transform: img = self.transform(img) if self.Usage == 'Training': return (img, label) return (img, obj['id_code']) def change_type(self, img, label): return (img.astype(np.float32), label.astype(np.long)) def read(self, image): return cv.imread(image) def reshape(self, image): return cv.resize(image, (244, 244)) def __len__(self): return len(self.df) transformation = transforms.Compose([transforms.Resize((224, 224)), transforms.ColorJitter(0.1), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) myDataset = MyDataset('../input/train_images', '../input/train.csv', transform=transformation, Usage='Training') test_size = 0.2 samples = len(myDataset) indices = list(range(samples)) np.random.shuffle(indices) train_len = int(np.floor(samples * test_size)) train_idx, valid_idx = (indices[train_len:], indices[:train_len]) train_sampler = sampler.SubsetRandomSampler(train_idx) valid_sampler = sampler.SubsetRandomSampler(valid_idx) train_loader = torch.utils.data.DataLoader(myDataset, sampler=train_sampler, batch_size=32, shuffle=False) test_loader = torch.utils.data.DataLoader(myDataset, sampler=valid_sampler, batch_size=32, shuffle=False) use_cuda = torch.cuda.is_available() device = torch.device('cuda' if use_cuda else 'cpu') class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, padding=3) self.conv1_1 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=7, padding=3) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, padding=3) self.conv2_1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=7, padding=3) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, padding=2) self.conv3_1 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, padding=2) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, padding=2) self.conv4_1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=5, padding=2) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, padding=2) self.conv5_1 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, padding=2) self.fc1 = nn.Linear(in_features=512 * 7 * 7, out_features=1024) self.fc2 = nn.Linear(in_features=1024, out_features=1024) self.fc3 = nn.Linear(in_features=1024, out_features=512) self.out = nn.Linear(in_features=1024, out_features=5) self.dropout = nn.Dropout(0.5) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv1_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv2(x)) x = F.relu(self.conv2_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv3(x)) x = F.relu(self.conv3_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv4(x)) x = F.relu(self.conv4_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv5(x)) x = F.relu(self.conv5_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = x.reshape(-1, 512 * 7 * 7) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.log_softmax(self.out(x), dim=1) return x net = Net().to(device) criterion = torch.nn.NLLLoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.0001) lr_step = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.3) for epoch in range(20): running_loss = 0.0 valid_loss = [] valid_auc = [] train_loss = [] train_auc = [] net.train() for i, (image, label) in enumerate(train_loader): image, label = (image.to(device), label.to(device)) optimizer.zero_grad() output = net(image) loss = criterion(output, label) _, output = torch.max(output, 1) loss.backward() optimizer.step() running_loss += loss.item() train_loss.append(loss.item()) train_auc.append(accuracy_score(torch.Tensor.cpu(output), torch.Tensor.cpu(label))) if i % 10 == 9: running_loss = 0.0 net.eval() for i, (image, label) in enumerate(test_loader): image, label = (image.to(device), label.to(device)) output = net(image) loss = criterion(output, label) _, output = torch.max(output, 1) valid_loss.append(loss.item()) valid_auc.append(accuracy_score(output.cpu().detach().numpy(), label.cpu().detach().numpy())) print('The state dict keys: \n\n', net.state_dict().keys())
code
17097145/cell_4
[ "text_plain_output_1.png" ]
from PIL import Image import cv2 as cv import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision.transforms as transforms train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') target_dict = {0: 'No DR(Diabetic Retinopathy)', 1: 'Mild', 2: 'Moderate', 3: 'Severe', 4: 'Proliferative DR(Diabetic Retinopathy)'} class MyDataset(torch.utils.data.Dataset): def __init__(self, dir_img_path=None, dir_csv_path=None, Usage=None, transform=None): super().__init__() '\n Usage:\n ' self.DIR_PATH = '../input' self.df = pd.read_csv(dir_csv_path) self.dir_img_path = dir_img_path self.images = self.loadImage(self.dir_img_path) self.Usage = Usage self.transform = transform print('{} Data length of image {}:'.format(Usage, len(self.images))) print('{} Data length of csv file {}:'.format(Usage, len(self.df))) def loadImage(self, path): return os.listdir(path) def __getitem__(self, pos): obj = self.df.loc[pos] img_id = obj['id_code'] if self.Usage == 'Training': label = obj['diagnosis'] img_id = '{}.png'.format(img_id) img = cv.imread(os.path.join(self.dir_img_path, img_id)) img = Image.fromarray(img) if self.transform: img = self.transform(img) if self.Usage == 'Training': return (img, label) return (img, obj['id_code']) def change_type(self, img, label): return (img.astype(np.float32), label.astype(np.long)) def read(self, image): return cv.imread(image) def reshape(self, image): return cv.resize(image, (244, 244)) def __len__(self): return len(self.df) transformation = transforms.Compose([transforms.Resize((224, 224)), transforms.ColorJitter(0.1), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) myDataset = MyDataset('../input/train_images', '../input/train.csv', transform=transformation, Usage='Training')
code
17097145/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import cv2 as cv import os import matplotlib.pyplot as plt import torch import torch.nn.functional as F import torch.utils.data.dataloader as DataLoader import torch.utils.data.sampler as sampler import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim from PIL import Image from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import os print(os.listdir('../input'))
code
17097145/cell_8
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.metrics import accuracy_score import cv2 as cv import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.sampler as sampler import torchvision.transforms as transforms train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') target_dict = {0: 'No DR(Diabetic Retinopathy)', 1: 'Mild', 2: 'Moderate', 3: 'Severe', 4: 'Proliferative DR(Diabetic Retinopathy)'} class MyDataset(torch.utils.data.Dataset): def __init__(self, dir_img_path=None, dir_csv_path=None, Usage=None, transform=None): super().__init__() '\n Usage:\n ' self.DIR_PATH = '../input' self.df = pd.read_csv(dir_csv_path) self.dir_img_path = dir_img_path self.images = self.loadImage(self.dir_img_path) self.Usage = Usage self.transform = transform def loadImage(self, path): return os.listdir(path) def __getitem__(self, pos): obj = self.df.loc[pos] img_id = obj['id_code'] if self.Usage == 'Training': label = obj['diagnosis'] img_id = '{}.png'.format(img_id) img = cv.imread(os.path.join(self.dir_img_path, img_id)) img = Image.fromarray(img) if self.transform: img = self.transform(img) if self.Usage == 'Training': return (img, label) return (img, obj['id_code']) def change_type(self, img, label): return (img.astype(np.float32), label.astype(np.long)) def read(self, image): return cv.imread(image) def reshape(self, image): return cv.resize(image, (244, 244)) def __len__(self): return len(self.df) transformation = transforms.Compose([transforms.Resize((224, 224)), transforms.ColorJitter(0.1), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) myDataset = MyDataset('../input/train_images', '../input/train.csv', transform=transformation, Usage='Training') test_size = 0.2 samples = len(myDataset) indices = list(range(samples)) np.random.shuffle(indices) train_len = int(np.floor(samples * test_size)) train_idx, valid_idx = (indices[train_len:], indices[:train_len]) train_sampler = sampler.SubsetRandomSampler(train_idx) valid_sampler = sampler.SubsetRandomSampler(valid_idx) train_loader = torch.utils.data.DataLoader(myDataset, sampler=train_sampler, batch_size=32, shuffle=False) test_loader = torch.utils.data.DataLoader(myDataset, sampler=valid_sampler, batch_size=32, shuffle=False) use_cuda = torch.cuda.is_available() device = torch.device('cuda' if use_cuda else 'cpu') class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, padding=3) self.conv1_1 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=7, padding=3) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, padding=3) self.conv2_1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=7, padding=3) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, padding=2) self.conv3_1 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, padding=2) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, padding=2) self.conv4_1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=5, padding=2) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, padding=2) self.conv5_1 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, padding=2) self.fc1 = nn.Linear(in_features=512 * 7 * 7, out_features=1024) self.fc2 = nn.Linear(in_features=1024, out_features=1024) self.fc3 = nn.Linear(in_features=1024, out_features=512) self.out = nn.Linear(in_features=1024, out_features=5) self.dropout = nn.Dropout(0.5) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv1_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv2(x)) x = F.relu(self.conv2_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv3(x)) x = F.relu(self.conv3_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv4(x)) x = F.relu(self.conv4_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv5(x)) x = F.relu(self.conv5_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = x.reshape(-1, 512 * 7 * 7) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.log_softmax(self.out(x), dim=1) return x net = Net().to(device) criterion = torch.nn.NLLLoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.0001) lr_step = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.3) for epoch in range(20): running_loss = 0.0 valid_loss = [] valid_auc = [] train_loss = [] train_auc = [] net.train() for i, (image, label) in enumerate(train_loader): image, label = (image.to(device), label.to(device)) optimizer.zero_grad() output = net(image) loss = criterion(output, label) _, output = torch.max(output, 1) loss.backward() optimizer.step() running_loss += loss.item() train_loss.append(loss.item()) train_auc.append(accuracy_score(torch.Tensor.cpu(output), torch.Tensor.cpu(label))) if i % 10 == 9: print('[%d, %5d] loss: %.5f Accuracy:%.5f' % (epoch + 1, i + 1, running_loss / 100, accuracy_score(torch.Tensor.cpu(output), torch.Tensor.cpu(label)))) running_loss = 0.0 net.eval() for i, (image, label) in enumerate(test_loader): image, label = (image.to(device), label.to(device)) output = net(image) loss = criterion(output, label) _, output = torch.max(output, 1) valid_loss.append(loss.item()) valid_auc.append(accuracy_score(output.cpu().detach().numpy(), label.cpu().detach().numpy())) print('Epoch {}, train loss: {}, train accuracy: {}\tvalid loss: {}, valid accuracy: {}'.format(epoch + 1, np.mean(train_loss), np.mean(train_auc), np.mean(valid_loss), np.mean(valid_auc)))
code
17097145/cell_10
[ "text_plain_output_1.png" ]
from PIL import Image from sklearn.metrics import accuracy_score import cv2 as cv import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.sampler as sampler import torchvision.transforms as transforms train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') target_dict = {0: 'No DR(Diabetic Retinopathy)', 1: 'Mild', 2: 'Moderate', 3: 'Severe', 4: 'Proliferative DR(Diabetic Retinopathy)'} class MyDataset(torch.utils.data.Dataset): def __init__(self, dir_img_path=None, dir_csv_path=None, Usage=None, transform=None): super().__init__() '\n Usage:\n ' self.DIR_PATH = '../input' self.df = pd.read_csv(dir_csv_path) self.dir_img_path = dir_img_path self.images = self.loadImage(self.dir_img_path) self.Usage = Usage self.transform = transform def loadImage(self, path): return os.listdir(path) def __getitem__(self, pos): obj = self.df.loc[pos] img_id = obj['id_code'] if self.Usage == 'Training': label = obj['diagnosis'] img_id = '{}.png'.format(img_id) img = cv.imread(os.path.join(self.dir_img_path, img_id)) img = Image.fromarray(img) if self.transform: img = self.transform(img) if self.Usage == 'Training': return (img, label) return (img, obj['id_code']) def change_type(self, img, label): return (img.astype(np.float32), label.astype(np.long)) def read(self, image): return cv.imread(image) def reshape(self, image): return cv.resize(image, (244, 244)) def __len__(self): return len(self.df) transformation = transforms.Compose([transforms.Resize((224, 224)), transforms.ColorJitter(0.1), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) myDataset = MyDataset('../input/train_images', '../input/train.csv', transform=transformation, Usage='Training') test_size = 0.2 samples = len(myDataset) indices = list(range(samples)) np.random.shuffle(indices) train_len = int(np.floor(samples * test_size)) train_idx, valid_idx = (indices[train_len:], indices[:train_len]) train_sampler = sampler.SubsetRandomSampler(train_idx) valid_sampler = sampler.SubsetRandomSampler(valid_idx) train_loader = torch.utils.data.DataLoader(myDataset, sampler=train_sampler, batch_size=32, shuffle=False) test_loader = torch.utils.data.DataLoader(myDataset, sampler=valid_sampler, batch_size=32, shuffle=False) use_cuda = torch.cuda.is_available() device = torch.device('cuda' if use_cuda else 'cpu') class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, padding=3) self.conv1_1 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=7, padding=3) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, padding=3) self.conv2_1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=7, padding=3) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, padding=2) self.conv3_1 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, padding=2) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, padding=2) self.conv4_1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=5, padding=2) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, padding=2) self.conv5_1 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, padding=2) self.fc1 = nn.Linear(in_features=512 * 7 * 7, out_features=1024) self.fc2 = nn.Linear(in_features=1024, out_features=1024) self.fc3 = nn.Linear(in_features=1024, out_features=512) self.out = nn.Linear(in_features=1024, out_features=5) self.dropout = nn.Dropout(0.5) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv1_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv2(x)) x = F.relu(self.conv2_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv3(x)) x = F.relu(self.conv3_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv4(x)) x = F.relu(self.conv4_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = F.relu(self.conv5(x)) x = F.relu(self.conv5_1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = self.dropout(x) x = x.reshape(-1, 512 * 7 * 7) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.log_softmax(self.out(x), dim=1) return x net = Net().to(device) criterion = torch.nn.NLLLoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.0001) lr_step = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.3) for epoch in range(20): running_loss = 0.0 valid_loss = [] valid_auc = [] train_loss = [] train_auc = [] net.train() for i, (image, label) in enumerate(train_loader): image, label = (image.to(device), label.to(device)) optimizer.zero_grad() output = net(image) loss = criterion(output, label) _, output = torch.max(output, 1) loss.backward() optimizer.step() running_loss += loss.item() train_loss.append(loss.item()) train_auc.append(accuracy_score(torch.Tensor.cpu(output), torch.Tensor.cpu(label))) if i % 10 == 9: running_loss = 0.0 net.eval() for i, (image, label) in enumerate(test_loader): image, label = (image.to(device), label.to(device)) output = net(image) loss = criterion(output, label) _, output = torch.max(output, 1) valid_loss.append(loss.item()) valid_auc.append(accuracy_score(output.cpu().detach().numpy(), label.cpu().detach().numpy())) checkpoint = {'model': Net(), 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict()} torch.save(checkpoint, 'checkpoint.pth')
code
17097145/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image import cv2 as cv import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.utils.data.sampler as sampler import torchvision.transforms as transforms train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') target_dict = {0: 'No DR(Diabetic Retinopathy)', 1: 'Mild', 2: 'Moderate', 3: 'Severe', 4: 'Proliferative DR(Diabetic Retinopathy)'} class MyDataset(torch.utils.data.Dataset): def __init__(self, dir_img_path=None, dir_csv_path=None, Usage=None, transform=None): super().__init__() '\n Usage:\n ' self.DIR_PATH = '../input' self.df = pd.read_csv(dir_csv_path) self.dir_img_path = dir_img_path self.images = self.loadImage(self.dir_img_path) self.Usage = Usage self.transform = transform def loadImage(self, path): return os.listdir(path) def __getitem__(self, pos): obj = self.df.loc[pos] img_id = obj['id_code'] if self.Usage == 'Training': label = obj['diagnosis'] img_id = '{}.png'.format(img_id) img = cv.imread(os.path.join(self.dir_img_path, img_id)) img = Image.fromarray(img) if self.transform: img = self.transform(img) if self.Usage == 'Training': return (img, label) return (img, obj['id_code']) def change_type(self, img, label): return (img.astype(np.float32), label.astype(np.long)) def read(self, image): return cv.imread(image) def reshape(self, image): return cv.resize(image, (244, 244)) def __len__(self): return len(self.df) transformation = transforms.Compose([transforms.Resize((224, 224)), transforms.ColorJitter(0.1), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) myDataset = MyDataset('../input/train_images', '../input/train.csv', transform=transformation, Usage='Training') test_size = 0.2 samples = len(myDataset) indices = list(range(samples)) np.random.shuffle(indices) train_len = int(np.floor(samples * test_size)) train_idx, valid_idx = (indices[train_len:], indices[:train_len]) train_sampler = sampler.SubsetRandomSampler(train_idx) valid_sampler = sampler.SubsetRandomSampler(valid_idx) print(len(train_sampler), len(valid_sampler)) train_loader = torch.utils.data.DataLoader(myDataset, sampler=train_sampler, batch_size=32, shuffle=False) test_loader = torch.utils.data.DataLoader(myDataset, sampler=valid_sampler, batch_size=32, shuffle=False) use_cuda = torch.cuda.is_available() device = torch.device('cuda' if use_cuda else 'cpu')
code
306027/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') scripts = pd.read_sql_query('\n\nSELECT s.Id,\n\n cv.Title,\n\n COUNT(DISTINCT vo.Id) NumVotes,\n\n COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END) NumNonSelfVotes,\n\n CASE WHEN COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END)>0 THEN 1 ELSE 0 END HasNonSelfVotes,\n\n COUNT(DISTINCT v.Id) NumVersions,\n\n SUM(CASE WHEN r.WorkerStatus=2 THEN 1 ELSE 0 END) NumSuccessfulRuns,\n\n SUM(CASE WHEN r.WorkerStatus=3 THEN 1 ELSE 0 END) NumErroredRuns,\n\n SUM(CASE WHEN v.IsChange=1 THEN 1 ELSE 0 END) NumChangedVersions,\n\n SUM(v.LinesInsertedFromPrevious-v.LinesDeletedFromPrevious) Lines,\n\n SUM(v.LinesInsertedFromPrevious+v.LinesChangedFromPrevious) LinesAddedOrChanged,\n\n l.Name\n\nFROM Scripts s\n\nINNER JOIN ScriptVersions v ON v.ScriptId=s.Id\n\nINNER JOIN ScriptVersions cv ON s.CurrentScriptVersionId=cv.Id\n\nINNER JOIN ScriptRuns r ON r.ScriptVersionId=v.Id\n\nINNER JOIN ScriptLanguages l ON v.ScriptLanguageId=l.Id\n\nLEFT OUTER JOIN ScriptVotes vo ON vo.ScriptVersionId=v.Id\n\nWHERE r.WorkerStatus != 4\n\n AND r.WorkerStatus != 5\n\nGROUP BY s.Id,\n\n cv.Title,\n\n cv.Id,\n\n l.Name\n\nORDER BY cv.Id DESC\n\n', con) scripts pd.read_sql_query('\nSELECT *\nFROM ScriptLanguages\nLIMIT 100\n', con)
code
306027/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') scripts = pd.read_sql_query('\nSELECT s.Id,\n cv.Title,\n COUNT(DISTINCT vo.Id) NumVotes,\n COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END) NumNonSelfVotes,\n CASE WHEN COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END)>0 THEN 1 ELSE 0 END HasNonSelfVotes,\n COUNT(DISTINCT v.Id) NumVersions,\n SUM(CASE WHEN r.WorkerStatus=2 THEN 1 ELSE 0 END) NumSuccessfulRuns,\n SUM(CASE WHEN r.WorkerStatus=3 THEN 1 ELSE 0 END) NumErroredRuns,\n SUM(CASE WHEN v.IsChange=1 THEN 1 ELSE 0 END) NumChangedVersions,\n SUM(v.LinesInsertedFromPrevious-v.LinesDeletedFromPrevious) Lines,\n SUM(v.LinesInsertedFromPrevious+v.LinesChangedFromPrevious) LinesAddedOrChanged,\n l.Name\nFROM Scripts s\nINNER JOIN ScriptVersions v ON v.ScriptId=s.Id\nINNER JOIN ScriptVersions cv ON s.CurrentScriptVersionId=cv.Id\nINNER JOIN ScriptRuns r ON r.ScriptVersionId=v.Id\nINNER JOIN ScriptLanguages l ON v.ScriptLanguageId=l.Id\nLEFT OUTER JOIN ScriptVotes vo ON vo.ScriptVersionId=v.Id\nWHERE r.WorkerStatus != 4\n AND r.WorkerStatus != 5\nGROUP BY s.Id,\n cv.Title,\n cv.Id,\n l.Name\nORDER BY cv.Id DESC\n', con) scripts
code
306027/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import Pipeline, FeatureUnion import pandas as pd import sqlite3 import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') scripts = pd.read_sql_query('\n\nSELECT s.Id,\n\n cv.Title,\n\n COUNT(DISTINCT vo.Id) NumVotes,\n\n COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END) NumNonSelfVotes,\n\n CASE WHEN COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END)>0 THEN 1 ELSE 0 END HasNonSelfVotes,\n\n COUNT(DISTINCT v.Id) NumVersions,\n\n SUM(CASE WHEN r.WorkerStatus=2 THEN 1 ELSE 0 END) NumSuccessfulRuns,\n\n SUM(CASE WHEN r.WorkerStatus=3 THEN 1 ELSE 0 END) NumErroredRuns,\n\n SUM(CASE WHEN v.IsChange=1 THEN 1 ELSE 0 END) NumChangedVersions,\n\n SUM(v.LinesInsertedFromPrevious-v.LinesDeletedFromPrevious) Lines,\n\n SUM(v.LinesInsertedFromPrevious+v.LinesChangedFromPrevious) LinesAddedOrChanged,\n\n l.Name\n\nFROM Scripts s\n\nINNER JOIN ScriptVersions v ON v.ScriptId=s.Id\n\nINNER JOIN ScriptVersions cv ON s.CurrentScriptVersionId=cv.Id\n\nINNER JOIN ScriptRuns r ON r.ScriptVersionId=v.Id\n\nINNER JOIN ScriptLanguages l ON v.ScriptLanguageId=l.Id\n\nLEFT OUTER JOIN ScriptVotes vo ON vo.ScriptVersionId=v.Id\n\nWHERE r.WorkerStatus != 4\n\n AND r.WorkerStatus != 5\n\nGROUP BY s.Id,\n\n cv.Title,\n\n cv.Id,\n\n l.Name\n\nORDER BY cv.Id DESC\n\n', con) scripts from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier class RawColumnExtractor: def __init__(self, column): self.column = column def fit(self, *_): return self def transform(self, data): return data[[self.column]] features = FeatureUnion([('NumSuccessfulRuns', RawColumnExtractor('NumSuccessfulRuns')), ('NumChangedVersions', RawColumnExtractor('NumChangedVersions'))]) pipeline = Pipeline([('feature_union', features), ('predictor', RandomForestClassifier())]) train = scripts target_name = 'HasNonSelfVotes' x_train, x_test, y_train, y_test = train_test_split(train, train[target_name], test_size=0.4, random_state=0) pipeline.fit(x_train, y_train) score = pipeline.score(x_test, y_test) print('Score %f' % score)
code
90133854/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv') ge x = 0 for i in ge.columns: print(f'{i},"coulmn_value_count"' + f'num_of_coulmn=\t{x}' + f'\tnum_of_items_in_each_coulmn\t{len(ge[i].value_counts())})') print(ge[i].value_counts().to_frame) x = x + 1 print('*******************')
code
90133854/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv') ge x = 0 for i in ge.columns: x = x + 1 ge.describe().round(2).T ge.isnull().sum() ge1 = ge.copy() cat_ge = list(ge.select_dtypes(exclude='float64').columns) num_ge = list(ge.select_dtypes(include='float64').columns) i = ['gender'] num_ge = num_ge + i col = ge1.columns fgi, ax = plt.subplots(len(num_ge), 1, figsize=(15, 15)) for ind, axi in enumerate(ax.flat): axi.boxplot(ge1[num_ge[ind]], vert=False) axi.set_title(num_ge[ind], size=12)
code
90133854/cell_6
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd ge = pd.read_csv('../input/gender-classification-dataset/gender_classification_v7.csv') ge x = 0 for i in ge.columns: x = x + 1 ge.describe().round(2).T ge.info()
code