path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
106196484/cell_16 | [
"text_plain_output_1.png"
] | from flashtext import KeywordProcessor
from nltk.tokenize import sent_tokenize
import pke
import re
import re
ml_ftxt = re.sub('\\n', ' ', ml_ftxt)
ml_ftxt = ml_ftxt.translate(str.maketrans(' ', ' ', '!"#$%&\'()*+-/:;<=>?@[\\]^_`{|}~'))
ml_ftxt = re.sub('[A-Za-z0-9]*@[A-Za-z]*\\.?[A-Za-z0-9]*', '', ml_ftxt)
extractor = pke.unsupervised.TextRank()
extractor.load_document(input=ml_ftxt)
extractor.candidate_selection()
extractor.candidate_weighting()
keyphrases2 = extractor.get_n_best(n=9)
TextRank = []
for i, j in keyphrases2:
TextRank.append(i)
from nltk.tokenize import sent_tokenize
from flashtext import KeywordProcessor
def tokenize_sentences(text):
sentences = [sent_tokenize(text)]
sentences = [y for x in sentences for y in x]
sentences = [sentence.strip() for sentence in sentences if len(sentence) > 20]
return sentences
def get_sentences_for_keyword(keywords, sentences):
keyword_processor = KeywordProcessor()
keyword_sentences = {}
for word in keywords:
keyword_sentences[word] = []
keyword_processor.add_keyword(word)
for sentence in sentences:
keywords_found = keyword_processor.extract_keywords(sentence)
for key in keywords_found:
keyword_sentences[key].append(sentence)
for key in keyword_sentences.keys():
values = keyword_sentences[key]
values = sorted(values, key=len, reverse=True)
keyword_sentences[key] = values
return keyword_sentences
sentences = tokenize_sentences(ml_ftxt)
keyword_sentence_mapping = get_sentences_for_keyword(TextRank, sentences)
sentences = []
for i, j in keyword_sentence_mapping.items():
sentences.append(j[0])
for i in sentences:
print(nlp(i)) | code |
106196484/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | ml_ftxt[:3000] | code |
106196484/cell_14 | [
"text_plain_output_1.png"
] | !pip install --upgrade pip
!pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab] | code |
106196484/cell_10 | [
"text_plain_output_1.png"
] | import pke
import re
import re
ml_ftxt = re.sub('\\n', ' ', ml_ftxt)
ml_ftxt = ml_ftxt.translate(str.maketrans(' ', ' ', '!"#$%&\'()*+-/:;<=>?@[\\]^_`{|}~'))
ml_ftxt = re.sub('[A-Za-z0-9]*@[A-Za-z]*\\.?[A-Za-z0-9]*', '', ml_ftxt)
extractor = pke.unsupervised.TextRank()
extractor.load_document(input=ml_ftxt)
extractor.candidate_selection()
extractor.candidate_weighting()
keyphrases2 = extractor.get_n_best(n=9)
TextRank = []
for i, j in keyphrases2:
TextRank.append(i)
TextRank | code |
106196484/cell_12 | [
"text_plain_output_1.png"
] | !pip install -U transformers==3.0.0
!python -m nltk.downloader punkt
!git clone https://github.com/patil-suraj/question_generation.git | code |
106196484/cell_5 | [
"text_plain_output_1.png"
] | import re
import re
ml_ftxt = re.sub('\\n', ' ', ml_ftxt)
ml_ftxt = ml_ftxt.translate(str.maketrans(' ', ' ', '!"#$%&\'()*+-/:;<=>?@[\\]^_`{|}~'))
ml_ftxt = re.sub('[A-Za-z0-9]*@[A-Za-z]*\\.?[A-Za-z0-9]*', '', ml_ftxt)
ml_ftxt[:3000] | code |
88098897/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('iris.csv')
df | code |
129020311/cell_25 | [
"image_output_5.png",
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
data.drop(['Ticket', 'Cabin'], axis=1, inplace=True)
data.head() | code |
129020311/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
print(train.shape)
print(test.shape) | code |
129020311/cell_33 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
data.drop(['Ticket', 'Cabin'], axis=1, inplace=True)
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
cat_cols = [feature for feature in data.columns if data[feature].dtypes == 'O']
cat_cols.extend(['SibSp', 'Pclass'])
cat_cols | code |
129020311/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
data.head() | code |
129020311/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.head() | code |
129020311/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
num_cols = num_cols[1:6]
num_cols
data.drop(['Ticket', 'Cabin'], axis=1, inplace=True)
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
cat_cols = [feature for feature in data.columns if data[feature].dtypes == 'O']
num_cols | code |
129020311/cell_26 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
data.drop(['Ticket', 'Cabin'], axis=1, inplace=True)
data.info() | code |
129020311/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df | code |
129020311/cell_1 | [
"text_plain_output_1.png"
] | import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129020311/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
data.info() | code |
129020311/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
num_cols = num_cols[1:6]
num_cols
def plt_num_cols(data, num_var):
for feature in num_var:
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={'height_ratios': (0.15, 0.85)})
f.set_figheight(3)
f.set_figwidth(15)
sns.boxplot(x=feature, data=data, ax=ax_box, orient='h')
sns.histplot(data=data, x=feature, ax=ax_hist)
plt.show()
plt_num_cols(data, num_cols) | code |
129020311/cell_32 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
num_cols = num_cols[1:6]
num_cols
data.drop(['Ticket', 'Cabin'], axis=1, inplace=True)
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
cat_cols = [feature for feature in data.columns if data[feature].dtypes == 'O']
del num_cols[0]
num_cols | code |
129020311/cell_15 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols | code |
129020311/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
num_cols = num_cols[1:6]
num_cols | code |
129020311/cell_17 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
sns.boxplot(x='Age', data=data) | code |
129020311/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
len(data['Ticket'].unique()) | code |
129020311/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
num_cols = [feature for feature in data.columns if data[feature].dtypes != 'O']
num_cols
data['Survived'].hist() | code |
129020311/cell_12 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape
data.set_index('PassengerId', inplace=True)
data.drop('Name', axis=1, inplace=True)
def find_missing_df(data):
"""
Create dataframe showing which columns is missing values, amount and percent of missing values
"""
missing_df = pd.DataFrame(columns=['Column_name', 'Total_missing_values', 'Percent_missing', 'data_types'])
col_name_arr = []
missing_value_arr = []
percent_missing_arr = []
dtypes_arr = []
for col in data:
if data[col].isna().sum() > 0:
percent_missing = np.float(f'{data[col].isna().sum() / data.shape[0] * 100:.2f}')
col_name_arr.append(col)
dtypes_arr.append(data[col].dtypes)
missing_value_arr.append(data[col].isna().sum())
percent_missing_arr.append(percent_missing)
missing_df['Column_name'] = col_name_arr
missing_df['Total_missing_values'] = missing_value_arr
missing_df['Percent_missing'] = percent_missing_arr
missing_df['data_types'] = dtypes_arr
return missing_df
missing_df = find_missing_df(data)
missing_df
missing_cols = missing_df.drop(0).Column_name.values
missing_cols | code |
129020311/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
data = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
data.shape | code |
1004133/cell_4 | [
"image_output_11.png",
"image_output_24.png",
"image_output_25.png",
"text_plain_output_5.png",
"text_plain_output_15.png",
"image_output_17.png",
"text_plain_output_9.png",
"image_output_14.png",
"image_output_28.png",
"text_plain_output_20.png",
"image_output_23.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"image_output_13.png",
"image_output_5.png",
"text_plain_output_14.png",
"image_output_18.png",
"image_output_21.png",
"text_plain_output_27.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_25.png",
"image_output_20.png",
"text_plain_output_18.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_22.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_16.png",
"image_output_16.png",
"text_plain_output_8.png",
"text_plain_output_26.png",
"image_output_27.png",
"image_output_6.png",
"text_plain_output_23.png",
"image_output_12.png",
"text_plain_output_28.png",
"image_output_22.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png",
"image_output_26.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.preprocessing as pre
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
images = train_data.drop('label', 1)
observations, features = images.shape
pixel_width = int(np.sqrt(features))
X = images.as_matrix()
X_train = X.reshape(observations, pixel_width, pixel_width, 1)
labels = train_data['label']
Y = labels.as_matrix()
labels = pre.LabelEncoder().fit_transform(labels)[:, None]
Y_train = pre.OneHotEncoder().fit_transform(labels).todense()
t = test_data.as_matrix()
tr, tc = t.shape
test = t.reshape(tr, pixel_width, pixel_width, 1)
def showImage(X, index):
N, w, h, c = X.shape
grid = np.zeros((w, h))
for i in range(w):
for j in range(h):
grid[i, j] = X[index, i, j, 0]
plt.rcParams['figure.figsize'] = [1.5, 1.5]
plt.imshow(grid, cmap='gray')
plt.ion()
plt.show()
showImage(X_train, 3)
showImage(X_train, 875)
showImage(X_train, 40000) | code |
1004133/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import sklearn.preprocessing as pre
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
images = train_data.drop('label', 1)
observations, features = images.shape
pixel_width = int(np.sqrt(features))
X = images.as_matrix()
X_train = X.reshape(observations, pixel_width, pixel_width, 1)
print('Image Array', X_train.shape)
labels = train_data['label']
Y = labels.as_matrix()
labels = pre.LabelEncoder().fit_transform(labels)[:, None]
Y_train = pre.OneHotEncoder().fit_transform(labels).todense()
print('Label Array', Y_train.shape)
t = test_data.as_matrix()
tr, tc = t.shape
test = t.reshape(tr, pixel_width, pixel_width, 1)
print('Image Array', test.shape) | code |
1004133/cell_10 | [
"text_plain_output_1.png"
] | import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.preprocessing as pre
import tensorflow as tf
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
images = train_data.drop('label', 1)
observations, features = images.shape
pixel_width = int(np.sqrt(features))
X = images.as_matrix()
X_train = X.reshape(observations, pixel_width, pixel_width, 1)
labels = train_data['label']
Y = labels.as_matrix()
labels = pre.LabelEncoder().fit_transform(labels)[:, None]
Y_train = pre.OneHotEncoder().fit_transform(labels).todense()
t = test_data.as_matrix()
tr, tc = t.shape
test = t.reshape(tr, pixel_width, pixel_width, 1)
def showImage(X, index):
N, w, h, c = X.shape
grid = np.zeros((w, h))
for i in range(w):
for j in range(h):
grid[i, j] = X[index, i, j, 0]
plt.rcParams['figure.figsize'] = [1.5, 1.5]
plt.ion()
X = tf.placeholder('float', [None, 28, 28, 1])
Y = tf.placeholder('float', [None, 10])
lr = tf.placeholder(tf.float32)
pkeep = tf.placeholder(tf.float32)
pkeep_conv = tf.placeholder(tf.float32)
K = 24
L = 48
M = 64
N = 200
W1 = tf.Variable(tf.truncated_normal([6, 6, 1, K], stddev=0.1))
B1 = tf.Variable(tf.ones([K]) / 10)
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))
B2 = tf.Variable(tf.ones([L]) / 10)
W3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))
B3 = tf.Variable(tf.ones([M]) / 10)
W4 = tf.Variable(tf.truncated_normal([7 * 7 * M, N], stddev=0.1))
B4 = tf.Variable(tf.ones([N]) / 10)
W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))
B5 = tf.Variable(tf.ones([10]) / 10)
def batchnorm(Ylogits, beta, convolutional=False):
bnepsilon = 1e-05
if convolutional:
mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])
else:
mean, variance = tf.nn.moments(Ylogits, [0])
BN = tf.nn.batch_normalization(Ylogits, mean, variance, beta, None, bnepsilon)
return BN
stride = 1
Y1 = tf.nn.conv2d(X, W1, strides=[1, stride, stride, 1], padding='SAME')
BN1 = batchnorm(Y1, B1, convolutional=True)
Y1_BN = tf.nn.relu(BN1)
stride = 2
Y2 = tf.nn.conv2d(Y1_BN, W2, strides=[1, stride, stride, 1], padding='SAME')
BN2 = batchnorm(Y2, B2, convolutional=True)
Y2_BN = tf.nn.relu(BN2)
stride = 2
Y3 = tf.nn.conv2d(Y2_BN, W3, strides=[1, stride, stride, 1], padding='SAME')
BN3 = batchnorm(Y3, B3, convolutional=True)
Y3_BN = tf.nn.relu(BN3)
YY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])
Y4 = tf.matmul(YY, W4)
BN4 = batchnorm(Y4, B4)
Y4_BN = tf.nn.relu(BN4)
YY4 = tf.nn.dropout(Y4_BN, pkeep)
Ylogits = tf.matmul(YY4, W5) + B5
YHat = tf.nn.softmax(Ylogits)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y)
cross_entropy = tf.reduce_mean(cross_entropy)
correct_prediction = tf.equal(tf.argmax(YHat, 1), tf.argmax(Y_train, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
predict = tf.argmax(YHat, 1)
training_epochs = 2
batch_size = 100
max_learning_rate = 0.02
min_learning_rate = 0.0001
decay_speed = 1600.0
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
total_batch = int(observations / batch_size)
batch_no = 1
print('Optimization In Progress')
for epoch in range(training_epochs):
c = 0.0
avg_cost = 0.0
for i in range(total_batch):
start = i * 100
end = start + batch_size - 1
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i / decay_speed)
batch_X = X_train[start:end]
batch_Y = Y_train[start:end]
_, c = sess.run([optimizer, cross_entropy], feed_dict={X: batch_X, Y: batch_Y, lr: learning_rate, pkeep: 0.75})
avg_cost += c / total_batch
print('epoch No {} cross entropy={}'.format(epoch + 1, avg_cost))
print('Optimization Completed')
print('Predictions')
test_batch = batch_size
predictions = np.zeros(test.shape[0])
for i in range(0, test.shape[0] // test_batch):
predictions[i * test_batch:(i + 1) * test_batch] = predict.eval(feed_dict={X: test[i * test_batch:(i + 1) * test_batch], pkeep: 1.0}) | code |
72108430/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
iris.info() | code |
72108430/cell_30 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax=plt.subplots(1,1,figsize=(10,8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8))
plt.title("Iris Species %")
plt.show()
train, test = train_test_split(iris, test_size=0.25)
train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
train_y = train.Species
test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
test_y = test.Species
train_X.head(5) | code |
72108430/cell_33 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn import svm
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax=plt.subplots(1,1,figsize=(10,8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8))
plt.title("Iris Species %")
plt.show()
train, test = train_test_split(iris, test_size=0.25)
train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
train_y = train.Species
test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
test_y = test.Species
model = svm.SVC()
model.fit(train_X, train_y)
prediction = model.predict(test_X)
print('The accuracy of the SVM is:', metrics.accuracy_score(prediction, test_y)) | code |
72108430/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) | code |
72108430/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
iris.describe() | code |
72108430/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax=plt.subplots(1,1,figsize=(10,8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8))
plt.title("Iris Species %")
plt.show()
plt.figure(figsize=(7, 4))
sns.heatmap(iris.drop('Id', axis=1).corr(), annot=True, cmap='cubehelix_r')
plt.show() | code |
72108430/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
iris.head() | code |
72108430/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
sns.pairplot(iris.drop('Id', axis=1), hue='Species', height=3) | code |
72108430/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax=plt.subplots(1,1,figsize=(10,8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8))
plt.title("Iris Species %")
plt.show()
train, test = train_test_split(iris, test_size=0.25)
print(train.shape)
print(test.shape) | code |
72108430/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
iris['Species'].value_counts() | code |
72108430/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
sns.boxplot(x='Species', y='PetalLengthCm', data=iris) | code |
72108430/cell_35 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax=plt.subplots(1,1,figsize=(10,8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8))
plt.title("Iris Species %")
plt.show()
train, test = train_test_split(iris, test_size=0.25)
train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
train_y = train.Species
test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
test_y = test.Species
model = svm.SVC()
model.fit(train_X, train_y)
prediction = model.predict(test_X)
model = LogisticRegression()
model.fit(train_X, train_y)
prediction = model.predict(test_X)
print('The accuracy of the Logistic Regression is', metrics.accuracy_score(prediction, test_y)) | code |
72108430/cell_31 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax=plt.subplots(1,1,figsize=(10,8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8))
plt.title("Iris Species %")
plt.show()
train, test = train_test_split(iris, test_size=0.25)
train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
train_y = train.Species
test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]
test_y = test.Species
test_y.head(5) | code |
72108430/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species == 'Iris-setosa'].plot.scatter(x='PetalLengthCm', y='PetalWidthCm', color='orange', label='Setosa')
iris[iris.Species == 'Iris-versicolor'].plot.scatter(x='PetalLengthCm', y='PetalWidthCm', color='blue', label='versicolor', ax=fig)
iris[iris.Species == 'Iris-virginica'].plot.scatter(x='PetalLengthCm', y='PetalWidthCm', color='green', label='virginica', ax=fig)
fig.set_xlabel('Petal Length')
fig.set_ylabel('Petal Width')
fig.set_title(' Petal Length VS Width')
fig = plt.gcf()
fig.set_size_inches(10, 6)
plt.show() | code |
72108430/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa')
iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig)
iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig)
fig.set_xlabel("Petal Length")
fig.set_ylabel("Petal Width")
fig.set_title(" Petal Length VS Width")
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
ax = plt.subplots(1, 1, figsize=(10, 8))
iris['Species'].value_counts().plot.pie(autopct='%1.1f%%', shadow=True, figsize=(10, 8))
plt.title('Iris Species %')
plt.show() | code |
72108430/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
iris.plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm') | code |
72108430/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
iris = pd.read_csv('../input/iris/Iris.csv')
import seaborn as sns
import matplotlib.pyplot as plt
sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() | code |
16132902/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
def remove_duplicates(bkshare_df1):
bkshare_df1.drop_duplicates(inplace=True)
def descibe_df(bkshare_df1):
pass
def bkshare_corr(bkshare_df1):
pass
remove_duplicates(bkshare_df1)
descibe_df(bkshare_df1)
bkshare_corr(bkshare_df1)
plt.figure(figsize=(10, 5))
ax = sns.heatmap(bkshare_df1.corr(), annot=True)
plt.show(ax) | code |
16132902/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
def remove_duplicates(bkshare_df1):
bkshare_df1.drop_duplicates(inplace=True)
def descibe_df(bkshare_df1):
pass
def bkshare_corr(bkshare_df1):
pass
remove_duplicates(bkshare_df1)
descibe_df(bkshare_df1)
bkshare_corr(bkshare_df1)
plt.figure(figsize=(10,5))
ax = sns.heatmap(bkshare_df1.corr(), annot=True)
plt.show(ax)
X = bkshare_df1.drop(['count', 'casual', 'registered'], axis=1)
Y = bkshare_df1['count']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=12)
model = LinearRegression()
model.fit(X_train, Y_train)
print('Coef & Intercept:', model.coef_, model.intercept_) | code |
16132902/cell_2 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
bkshare_df1.info() | code |
16132902/cell_1 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
print(os.listdir('../input'))
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
bkshare_df1.head() | code |
16132902/cell_7 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
def remove_duplicates(bkshare_df1):
bkshare_df1.drop_duplicates(inplace=True)
def descibe_df(bkshare_df1):
pass
def bkshare_corr(bkshare_df1):
pass
remove_duplicates(bkshare_df1)
descibe_df(bkshare_df1)
bkshare_corr(bkshare_df1)
plt.figure(figsize=(10,5))
ax = sns.heatmap(bkshare_df1.corr(), annot=True)
plt.show(ax)
X = bkshare_df1.drop(['count', 'casual', 'registered'], axis=1)
Y = bkshare_df1['count']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=12)
model = LinearRegression()
model.fit(X_train, Y_train)
Y_train_predict = model.predict(X_train)
display(Y_train_predict.shape)
Y_test_predict = model.predict(X_test)
display(Y_test_predict.shape)
print('Train MSE:', mean_squared_error(Y_train, Y_train_predict))
print('Test MSE:', mean_squared_error(Y_test, Y_test_predict)) | code |
16132902/cell_3 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
def remove_duplicates(bkshare_df1):
bkshare_df1.drop_duplicates(inplace=True)
def descibe_df(bkshare_df1):
print('Describing Dataset')
print('------------------')
display(bkshare_df1.describe())
def bkshare_corr(bkshare_df1):
print('Correlation')
print('------------')
display(bkshare_df1.corr())
remove_duplicates(bkshare_df1)
descibe_df(bkshare_df1)
bkshare_corr(bkshare_df1) | code |
16132902/cell_5 | [
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import os
bkshare_df = pd.read_csv('../input/bike_share.csv')
bkshare_df1 = bkshare_df.copy()
def remove_duplicates(bkshare_df1):
bkshare_df1.drop_duplicates(inplace=True)
def descibe_df(bkshare_df1):
pass
def bkshare_corr(bkshare_df1):
pass
remove_duplicates(bkshare_df1)
descibe_df(bkshare_df1)
bkshare_corr(bkshare_df1)
plt.figure(figsize=(10,5))
ax = sns.heatmap(bkshare_df1.corr(), annot=True)
plt.show(ax)
X = bkshare_df1.drop(['count', 'casual', 'registered'], axis=1)
Y = bkshare_df1['count']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=12)
display(X_train.shape)
display(X_test.shape)
display(Y_train.shape)
display(Y_test.shape) | code |
129006129/cell_1 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_1.png"
] | !pip install transformers >/dev/null
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from tqdm import tqdm
device = 'cuda' if torch.cuda.is_available() else 'cpu'
import pandas as pd
import numpy as np
from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert = BertModel.from_pretrained("bert-base-uncased").to(device)
print("torch.cuda.is_available:",torch.cuda.is_available()) | code |
129006129/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import pandas as pd
import torch
test_data = pd.read_pickle('/kaggle/input/nlp-with-disaster-tweets-eda-cleaning-and-bert/test.pkl')
test_text = test_data.text_cleaned.apply(lambda x: x.lower()).values.tolist()
bert.eval()
testtext_embedding = []
with torch.no_grad():
for t in tqdm(test_text):
t = tokenizer(t, return_tensors='pt').to(device)
output = bert(**t).pooler_output.to('cpu').numpy()
testtext_embedding.append(output)
test_data = pd.read_pickle('/kaggle/input/nlp-with-disaster-tweets-eda-cleaning-and-bert/test.pkl')
test_data.to_csv('preprocessed_test.csv')
train_data = pd.read_pickle('/kaggle/input/nlp-with-disaster-tweets-eda-cleaning-and-bert/train.pkl')
train_text = train_data.text_cleaned.apply(lambda x: x.lower()).values
bert.eval()
traintext_embedding = []
with torch.no_grad():
for t in tqdm(train_text):
t = tokenizer(t, return_tensors='pt').to(device)
output = bert(**t).pooler_output.to('cpu').numpy()
traintext_embedding.append(output) | code |
129006129/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import pandas as pd
import torch
test_data = pd.read_pickle('/kaggle/input/nlp-with-disaster-tweets-eda-cleaning-and-bert/test.pkl')
test_text = test_data.text_cleaned.apply(lambda x: x.lower()).values.tolist()
bert.eval()
testtext_embedding = []
with torch.no_grad():
for t in tqdm(test_text):
t = tokenizer(t, return_tensors='pt').to(device)
output = bert(**t).pooler_output.to('cpu').numpy()
testtext_embedding.append(output) | code |
16119472/cell_13 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
test_df.take(5) | code |
16119472/cell_30 | [
"text_plain_output_1.png"
] | from nltk import pos_tag
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nltk.stem import WordNetLemmatizer
from pyspark.sql import SparkSession
import nltk
import string
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
from nltk import pos_tag
from nltk.corpus import wordnet
def get_wordnet_pos(pos_tag):
if pos_tag.startswith('J'):
return wordnet.ADJ
elif pos_tag.startswith('V'):
return wordnet.VERB
elif pos_tag.startswith('N'):
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
stop = stopwords.words('english')
text = [x for x in text if x not in stop]
text = [t for t in text if len(t) > 0]
pos_tags = pos_tag(text)
text = [WordNetLemmatizer().lemmatize(t[0], get_wordnet_pos(t[1])) for t in pos_tags]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5)
all_rdd = all_rdd.filter(lambda x: x is not None).filter(lambda x: x != '')
all_rdd = all_rdd.map(lambda x: x.lower())
all_rdd.take(5)
all_rdd = all_rdd.map(lambda x: [word.strip(string.punctuation) for word in x.split(' ')])
all_rdd = all_rdd.map(lambda text: [word for word in text if not any((c.isdigit() for c in word))])
all_rdd.take(5)
stop = stopwords.words('english')
remove_stop = lambda text: [x for x in text if x not in stop]
all_rdd = all_rdd.map(remove_stop)
all_rdd.take(5)
remove_empty = lambda text: [t for t in text if len(t) > 0]
all_rdd = all_rdd.map(remove_empty)
all_rdd.take(5)
import nltk
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def lemma(x):
return lemmatizer.lemmatize(x)
lemmatize = lambda x: [lemma(i) for i in x]
all_rdd = all_rdd.map(lemmatize) | code |
16119472/cell_6 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5) | code |
16119472/cell_29 | [
"text_plain_output_1.png"
] | from nltk import pos_tag
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from pyspark.sql import SparkSession
import string
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
from nltk import pos_tag
from nltk.corpus import wordnet
def get_wordnet_pos(pos_tag):
if pos_tag.startswith('J'):
return wordnet.ADJ
elif pos_tag.startswith('V'):
return wordnet.VERB
elif pos_tag.startswith('N'):
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
stop = stopwords.words('english')
text = [x for x in text if x not in stop]
text = [t for t in text if len(t) > 0]
pos_tags = pos_tag(text)
text = [WordNetLemmatizer().lemmatize(t[0], get_wordnet_pos(t[1])) for t in pos_tags]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5)
all_rdd = all_rdd.filter(lambda x: x is not None).filter(lambda x: x != '')
all_rdd = all_rdd.map(lambda x: x.lower())
all_rdd.take(5)
all_rdd = all_rdd.map(lambda x: [word.strip(string.punctuation) for word in x.split(' ')])
all_rdd = all_rdd.map(lambda text: [word for word in text if not any((c.isdigit() for c in word))])
all_rdd.take(5)
stop = stopwords.words('english')
remove_stop = lambda text: [x for x in text if x not in stop]
all_rdd = all_rdd.map(remove_stop)
all_rdd.take(5)
remove_empty = lambda text: [t for t in text if len(t) > 0]
all_rdd = all_rdd.map(remove_empty)
all_rdd.take(5) | code |
16119472/cell_26 | [
"text_plain_output_1.png"
] | from nltk import pos_tag
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from pyspark.sql import SparkSession
import string
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
from nltk import pos_tag
from nltk.corpus import wordnet
def get_wordnet_pos(pos_tag):
if pos_tag.startswith('J'):
return wordnet.ADJ
elif pos_tag.startswith('V'):
return wordnet.VERB
elif pos_tag.startswith('N'):
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
stop = stopwords.words('english')
text = [x for x in text if x not in stop]
text = [t for t in text if len(t) > 0]
pos_tags = pos_tag(text)
text = [WordNetLemmatizer().lemmatize(t[0], get_wordnet_pos(t[1])) for t in pos_tags]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5)
all_rdd = all_rdd.filter(lambda x: x is not None).filter(lambda x: x != '')
all_rdd = all_rdd.map(lambda x: x.lower())
all_rdd.take(5)
all_rdd = all_rdd.map(lambda x: [word.strip(string.punctuation) for word in x.split(' ')])
all_rdd = all_rdd.map(lambda text: [word for word in text if not any((c.isdigit() for c in word))])
all_rdd.take(5) | code |
16119472/cell_2 | [
"text_plain_output_1.png"
] | pip install pyspark | code |
16119472/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16119472/cell_7 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns | code |
16119472/cell_18 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5) | code |
16119472/cell_28 | [
"text_plain_output_1.png"
] | from nltk import pos_tag
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from pyspark.sql import SparkSession
import string
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
from nltk import pos_tag
from nltk.corpus import wordnet
def get_wordnet_pos(pos_tag):
if pos_tag.startswith('J'):
return wordnet.ADJ
elif pos_tag.startswith('V'):
return wordnet.VERB
elif pos_tag.startswith('N'):
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
stop = stopwords.words('english')
text = [x for x in text if x not in stop]
text = [t for t in text if len(t) > 0]
pos_tags = pos_tag(text)
text = [WordNetLemmatizer().lemmatize(t[0], get_wordnet_pos(t[1])) for t in pos_tags]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5)
all_rdd = all_rdd.filter(lambda x: x is not None).filter(lambda x: x != '')
all_rdd = all_rdd.map(lambda x: x.lower())
all_rdd.take(5)
all_rdd = all_rdd.map(lambda x: [word.strip(string.punctuation) for word in x.split(' ')])
all_rdd = all_rdd.map(lambda text: [word for word in text if not any((c.isdigit() for c in word))])
all_rdd.take(5)
stop = stopwords.words('english')
remove_stop = lambda text: [x for x in text if x not in stop]
all_rdd = all_rdd.map(remove_stop)
all_rdd.take(5) | code |
16119472/cell_15 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5) | code |
16119472/cell_31 | [
"text_plain_output_1.png"
] | from nltk import pos_tag
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nltk.stem import WordNetLemmatizer
from pyspark.sql import SparkSession
import nltk
import string
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
from nltk import pos_tag
from nltk.corpus import wordnet
def get_wordnet_pos(pos_tag):
if pos_tag.startswith('J'):
return wordnet.ADJ
elif pos_tag.startswith('V'):
return wordnet.VERB
elif pos_tag.startswith('N'):
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
import string
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
def clean_text(text):
text = text.lower()
text = [word.strip(string.punctuation) for word in text.split(' ')]
text = [word for word in text if not any((c.isdigit() for c in word))]
stop = stopwords.words('english')
text = [x for x in text if x not in stop]
text = [t for t in text if len(t) > 0]
pos_tags = pos_tag(text)
text = [WordNetLemmatizer().lemmatize(t[0], get_wordnet_pos(t[1])) for t in pos_tags]
text = [t for t in text if len(t) > 1]
text = ' '.join(text)
return text
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5)
all_rdd = all_rdd.filter(lambda x: x is not None).filter(lambda x: x != '')
all_rdd = all_rdd.map(lambda x: x.lower())
all_rdd.take(5)
all_rdd = all_rdd.map(lambda x: [word.strip(string.punctuation) for word in x.split(' ')])
all_rdd = all_rdd.map(lambda text: [word for word in text if not any((c.isdigit() for c in word))])
all_rdd.take(5)
stop = stopwords.words('english')
remove_stop = lambda text: [x for x in text if x not in stop]
all_rdd = all_rdd.map(remove_stop)
all_rdd.take(5)
remove_empty = lambda text: [t for t in text if len(t) > 0]
all_rdd = all_rdd.map(remove_empty)
all_rdd.take(5)
import nltk
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def lemma(x):
return lemmatizer.lemmatize(x)
lemmatize = lambda x: [lemma(i) for i in x]
all_rdd = all_rdd.map(lemmatize)
all_rdd.take(5) | code |
16119472/cell_24 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5)
test_df.take(5)
all_data = train_df.union(test_df)
all_data.take(5)
all_rdd = all_data.select('comment_text').rdd.flatMap(lambda x: x)
all_rdd.take(5)
all_rdd = all_rdd.filter(lambda x: x is not None).filter(lambda x: x != '')
all_rdd = all_rdd.map(lambda x: x.lower())
all_rdd.take(5) | code |
16119472/cell_10 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
out_cols | code |
16119472/cell_12 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('TestCSV').getOrCreate()
train_df = spark.read.csv('../input/train.csv', header=True)
test_df = spark.read.csv('../input/test.csv', header=True)
train_df.take(5)
train_df.columns
out_cols = [i for i in train_df.columns if i not in ['id', 'comment_text']]
train_df = train_df.drop(*out_cols)
train_df.take(5) | code |
34120998/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | !ls ../train_overlay/ | code |
34120998/cell_8 | [
"image_output_1.png"
] | from IPython.display import Image, display
from PIL import Image
display(Image(filename='../train_overlay/3046035f348012fdba6f7c53c4faa16e.png')) | code |
34120998/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/prostate-cancer-grade-assessment'
train = pd.read_csv(f'{path}/train.csv')
test = pd.read_csv(f'{path}/test.csv')
submission = pd.read_csv(f'{path}/sample_submission.csv')
suspicious = pd.read_csv(f'../input/suspicious-data-panda/suspicious_test_cases.csv')
data_dir = f'{path}/train_images'
mask_dir = f'{path}/train_label_masks'
df_train = train.copy().set_index('image_id')
for j in df_train.index:
for i in suspicious['image_id']:
if i == j:
df_train.drop([i], axis=0, inplace=True)
df_train | code |
90147986/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # Data processing, CSV file I/O (e.g. pd.read_csv)
krenth311 = pd.read_csv('../input/dataset/krenth311.csv')
krenth316 = pd.read_csv('../input/dataset/krenth316.csv')
merge = pd.concat([krenth311, krenth316])
merge.to_csv('merge.csv', index=False)
for col in ['aloneorinagroup']:
krenth311[col].value_counts(ascending=True).plot(kind='barh', title=col)
plt.xlabel('frequency')
plt.show() | code |
90147986/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import cufflinks as cf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import dates as md
import seaborn as sns
import plotly.graph_objs as go
import plotly
import cufflinks as cf
cf.set_config_file(offline=True)
import os | code |
90147986/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # Data processing, CSV file I/O (e.g. pd.read_csv)
krenth311 = pd.read_csv('../input/dataset/krenth311.csv')
krenth316 = pd.read_csv('../input/dataset/krenth316.csv')
merge = pd.concat([krenth311, krenth316])
merge.to_csv('merge.csv', index=False)
for i in heartrate:
i += i
heartrate = sum(heartx)
krenth311['heartrate'] = krenth311[sum(heartrate) / len(heartrate)]
krenth311['aloneorinagroup'] = krenth311['aloneorinagroup'].replace({9: 'Alone', 10: 'Online', 11: 'Group'}) | code |
128006303/cell_6 | [
"text_html_output_1.png"
] | model1 = LinearRegression()
model1.fit(total_X, total_y)
model2 = LinearRegression()
model2.fit(men_X, men_y)
model3 = LinearRegression()
model3.fit(women_X, women_y) | code |
128006303/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error | code |
128006303/cell_8 | [
"text_plain_output_1.png"
] | model1 = LinearRegression()
model1.fit(total_X, total_y)
model2 = LinearRegression()
model2.fit(men_X, men_y)
model3 = LinearRegression()
model3.fit(women_X, women_y)
pred_total = model1.predict(total_X)
pred_men = model2.predict(men_X)
pred_women = model3.predict(women_X)
"""
MSE_total:16949.508877183063
MSE_men:10974.075600749911
MSE_women:3972.9602897642253
"""
mse_total = mean_squared_error(pred_total, total_y)
print(f'MSE_total:{mse_total}')
mse_men = mean_squared_error(pred_men, men_y)
print(f'MSE_men:{mse_men}')
mse_women = mean_squared_error(pred_women, women_y)
print(f'MSE_women:{mse_women}') | code |
128006303/cell_3 | [
"text_html_output_1.png"
] | train = pd.read_csv('/kaggle/input/population-projections/train.csv')
train.head() | code |
89137453/cell_21 | [
"text_plain_output_1.png"
] | from ipywidgets import interact, widgets
from tensorflow import keras
import math
import matplotlib.pyplot as plt
import numpy as np
fashion_mnist = keras.datasets.fashion_mnist
(in_train, out_train), (in_valid, out_valid) = fashion_mnist.load_data()
(in_train.shape, in_valid.shape, np.unique(out_train))
in_train = in_train / 255.0
in_valid = in_valid / 255.0
class_names = {index: cn for index, cn in enumerate(['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'])}
# row_number = int(input('How many rows of training images would you like to review?'))
row_number = 5
def plot_train(n_rows = 2, predictions=None):
'''create a grid with 10 columns
'''
n_cols = 10
# n_rows = math.ceil(len(images) / n_cols)
images = in_train[: n_cols * n_rows]
labels = out_train[: n_cols * n_rows]
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
# plot first 20 images
plot_train(row_number)
#input('How many rows of training images would you like to review?')
def plot(images, labels, predictions=None):
'''create a grid with 10 columns
'''
n_cols = min(10, len(images))
n_rows = math.ceil(len(images) / n_cols)
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
# plot first 20 images
#plot(in_train[:20], out_train[:20])
def plot(images, labels, predictions=None):
# create a grid with 10 columns
n_cols = min(10, len(images))
n_rows = math.ceil(len(images) / n_cols)
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
# plot first 20 images
#plot(in_train[:20], out_train[:20])
model = keras.Sequential(layers=[keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(in_train, out_train, batch_size=60, epochs=10, validation_split=0.2)
loss, accuracy = model.evaluate(in_valid, out_valid)
probs = model.predict(in_valid)
preds = model.predict(in_valid).argsort()[:, -1]
from ipywidgets import interact, widgets
img_idx_slider = widgets.IntSlider(value=0, min=0, max=len(in_valid) - 1, description='Image index')
@interact(index=img_idx_slider)
def visualize_prediction(index=0):
fix, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
ax1.imshow(in_valid[index], cmap=plt.cm.binary)
ax1.set_title('label: %s' % class_names[out_valid[index]])
ax1.set_xlabel('predict: %s' % class_names[preds[index]])
ax2.bar(x=[class_names[index] for index in range(10)], height=probs[index] * 100)
plt.xticks(rotation=90) | code |
89137453/cell_4 | [
"text_plain_output_1.png"
] | from tensorflow import keras
fashion_mnist = keras.datasets.fashion_mnist
(in_train, out_train), (in_valid, out_valid) = fashion_mnist.load_data() | code |
89137453/cell_6 | [
"image_png_output_1.png"
] | out_train | code |
89137453/cell_19 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from tensorflow import keras
import math
import matplotlib.pyplot as plt
import numpy as np
fashion_mnist = keras.datasets.fashion_mnist
(in_train, out_train), (in_valid, out_valid) = fashion_mnist.load_data()
(in_train.shape, in_valid.shape, np.unique(out_train))
in_train = in_train / 255.0
in_valid = in_valid / 255.0
class_names = {index: cn for index, cn in enumerate(['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'])}
# row_number = int(input('How many rows of training images would you like to review?'))
row_number = 5
def plot_train(n_rows = 2, predictions=None):
'''create a grid with 10 columns
'''
n_cols = 10
# n_rows = math.ceil(len(images) / n_cols)
images = in_train[: n_cols * n_rows]
labels = out_train[: n_cols * n_rows]
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
# plot first 20 images
plot_train(row_number)
#input('How many rows of training images would you like to review?')
def plot(images, labels, predictions=None):
'''create a grid with 10 columns
'''
n_cols = min(10, len(images))
n_rows = math.ceil(len(images) / n_cols)
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
# plot first 20 images
#plot(in_train[:20], out_train[:20])
def plot(images, labels, predictions=None):
# create a grid with 10 columns
n_cols = min(10, len(images))
n_rows = math.ceil(len(images) / n_cols)
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
# plot first 20 images
#plot(in_train[:20], out_train[:20])
model = keras.Sequential(layers=[keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(in_train, out_train, batch_size=60, epochs=10, validation_split=0.2)
loss, accuracy = model.evaluate(in_valid, out_valid)
probs = model.predict(in_valid)
preds = model.predict(in_valid).argsort()[:, -1]
rand_idxs = np.random.permutation(len(in_valid))[:20]
plot(in_valid[rand_idxs], out_valid[rand_idxs], preds[rand_idxs]) | code |
89137453/cell_18 | [
"image_output_1.png"
] | from tensorflow import keras
import numpy as np
fashion_mnist = keras.datasets.fashion_mnist
(in_train, out_train), (in_valid, out_valid) = fashion_mnist.load_data()
(in_train.shape, in_valid.shape, np.unique(out_train))
in_train = in_train / 255.0
in_valid = in_valid / 255.0
model = keras.Sequential(layers=[keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(in_train, out_train, batch_size=60, epochs=10, validation_split=0.2)
loss, accuracy = model.evaluate(in_valid, out_valid)
probs = model.predict(in_valid)
print(probs.argmax(axis=1))
preds = model.predict(in_valid).argsort()[:, -1]
print(preds) | code |
89137453/cell_15 | [
"text_plain_output_1.png"
] | from tensorflow import keras
import numpy as np
fashion_mnist = keras.datasets.fashion_mnist
(in_train, out_train), (in_valid, out_valid) = fashion_mnist.load_data()
(in_train.shape, in_valid.shape, np.unique(out_train))
in_train = in_train / 255.0
in_valid = in_valid / 255.0
model = keras.Sequential(layers=[keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(in_train, out_train, batch_size=60, epochs=10, validation_split=0.2) | code |
89137453/cell_16 | [
"text_plain_output_1.png"
] | from tensorflow import keras
import numpy as np
fashion_mnist = keras.datasets.fashion_mnist
(in_train, out_train), (in_valid, out_valid) = fashion_mnist.load_data()
(in_train.shape, in_valid.shape, np.unique(out_train))
in_train = in_train / 255.0
in_valid = in_valid / 255.0
model = keras.Sequential(layers=[keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(in_train, out_train, batch_size=60, epochs=10, validation_split=0.2)
loss, accuracy = model.evaluate(in_valid, out_valid) | code |
89137453/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
(in_train.shape, in_valid.shape, np.unique(out_train))
in_train = in_train / 255.0
in_valid = in_valid / 255.0
class_names = {index: cn for index, cn in enumerate(['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'])}
row_number = 5
def plot_train(n_rows=2, predictions=None):
"""create a grid with 10 columns
"""
n_cols = 10
images = in_train[:n_cols * n_rows]
labels = out_train[:n_cols * n_rows]
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols + 3, n_rows + 2))
if predictions is None:
predictions = [None] * len(labels)
for index, (x, y_true, y_pred) in enumerate(zip(images, labels, predictions)):
ax = axes.flat[index]
ax.imshow(x, cmap=plt.cm.binary)
ax.set_title(class_names[y_true])
if y_pred is not None:
ax.set_xlabel(class_names[y_pred])
ax.set_xticks([])
ax.set_yticks([])
plot_train(row_number) | code |
89137453/cell_5 | [
"image_output_1.png"
] | import numpy as np
(in_train.shape, in_valid.shape, np.unique(out_train)) | code |
18143474/cell_4 | [
"image_output_1.png"
] | import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
df_source = pd.read_csv('../input/periodic-traffic-data/periodic_traffic.csv')
df_source['rep_date'] = pd.to_datetime(df_source['_time'])
df_source.drop(['_time'], axis=1, inplace=True)
df_source_time = df_source.copy()
df_source_time['rep_time'] = df_source_time['rep_date'].apply(lambda x: dt.datetime.strptime(x.strftime('%H:%M'), '%H:%M'))
df_source_time.drop(['rep_date'], axis=1, inplace=True)
df_source = df_source.set_index('rep_date')
df_source_time = df_source_time.set_index('rep_time')
df_source_time['C9'].plot()
plt.show() | code |
18143474/cell_6 | [
"text_html_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import datetime as dt
import lowess as lo
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_source = pd.read_csv('../input/periodic-traffic-data/periodic_traffic.csv')
df_source['rep_date'] = pd.to_datetime(df_source['_time'])
df_source.drop(['_time'], axis=1, inplace=True)
df_source_time = df_source.copy()
df_source_time['rep_time'] = df_source_time['rep_date'].apply(lambda x: dt.datetime.strptime(x.strftime('%H:%M'), '%H:%M'))
df_source_time.drop(['rep_date'], axis=1, inplace=True)
df_source = df_source.set_index('rep_date')
df_source_time = df_source_time.set_index('rep_time')
#Задаем необходимые переменные
#Window для расчета скользящего стандартного отклонения берем как, например, 1/12 от периода
v_window=8 #скользящее окно
k_out=1.5 #Коэффициент для умножения на std для расчета границ фильтрации выбросов для первого прогона
k_norm=1.5 #Коэффициент для умножения на std для расчета границ "нормального" трафика для второго прогона
i=df_source_time.index.shape[0]
x=np.linspace(-10,10,i)
#Вспомогательная функция для отсечения значений, выходящих за границы фильтрации выборосов
def f_out(x):
name=x.index[0]
if x[name] > x[name+'_lo']+k_out*x[name+'_std_first_step']:
x[name+'_adj']=np.nan
elif x[name] < x[name+'_lo']-k_out*x[name+'_std_first_step']:
x[name+'_adj']=np.nan
else:
x[name+'_adj']=x[name]
return x
#Функция для обработки данных.
#На вход функции подается объект Series из исходных данных.
#На выходе получаем данные с отсеченными выбросами ['lo'] и со стандартным отколонением для обработанных данных ['std'].
def f_low(df_x):
df_res=DataFrame(df_x)
name=df_res.columns[0]
i=df_x.index.shape[0]
x=np.linspace(-10,10,i)
df_res[name+'_lo'] = lo.lowess(x, df_x.values, x)
df_res[name+'_std_first_step'] = df_x.rolling(window=v_window,min_periods=0).std().fillna(method='bfill').shift(-int(v_window/2))
df_res=df_res.apply(f_out,axis=1)
df_res[name+'_adj_first_step']=df_res[name+'_adj'].fillna(method='bfill')
df_res[name+'_adj'] = lo.lowess(x, np.array(df_res[name+'_adj_first_step']), x)
df_res[name+'_std'] = df_res[name+'_adj_first_step'].rolling(window=v_window,min_periods=0).std().fillna(method='bfill').shift(-int(v_window/2))
return df_res
l=list(df_source_time.columns)
print( "Список полученных для анализа фич:\n{}".format(l) )
for name in l:
df=f_low(df_source_time[name].sort_index(axis=0))
display(df.head())
fig,ax = plt.subplots(1,figsize=(12,9))
ax.plot(df[name],'b.',label='Original') #исходный график
ax.plot(df[name+'_lo']+k_out*df[name+'_std_first_step'],'g',label='Границы фильтрации выбросов') #Верхняя граница для фильтрации выборосов
ax.plot(df[name+'_lo']-k_out*df[name+'_std_first_step'],'g',label='Границы фильтрации выбросов') #Нижняя граница для фильтрации выборосов
ax.plot(df[name+'_lo'],'r', label='Восстановленный график на первом шаге') #Восстановленный график методом lowess на первом шаге
ax.plot(df[name+'_adj']+k_norm*df[name+'_std'],'k', label='Верхняя граница нормального трафика') #Верхняя граница нормального трафика
ax.plot(df[name+'_adj']-k_norm*df[name+'_std'],'k', label='Нижняя граница нормального трафика') #Нижняя граница нормального трафика
ax.plot(df[name+'_adj'],'y', label='Восстановленный график на втором шаге') #Восстановленный график методом lowess на втором шаге
ax.set_title(name)
plt.legend()
plt.show()
for name in ['NAKA']:
df = f_low(df_source_time[name].sort_index(axis=0))
df[name + '_adj_avg'] = DataFrame(df[name + '_adj'].groupby(level=0).mean())
display(df.head())
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.plot(df[name], 'b.', label='Original')
ax1.plot(df[name + '_adj_avg'] + k_norm * df[name + '_std'], 'k', label='Верхняя граница нормального трафика')
ax1.plot(df[name + '_adj_avg'] - k_norm * df[name + '_std'], 'k', label='Нижняя граница нормального трафика')
ax1.plot(df[name + '_adj_avg'], 'r', label='Восстановленный график на втором шаге')
ax1.set_title(name)
ax1.legend()
ax2.plot(df_source[name])
ax2.legend()
plt.show() | code |
18143474/cell_7 | [
"text_html_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import datetime as dt
import lowess as lo
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_source = pd.read_csv('../input/periodic-traffic-data/periodic_traffic.csv')
df_source['rep_date'] = pd.to_datetime(df_source['_time'])
df_source.drop(['_time'], axis=1, inplace=True)
df_source_time = df_source.copy()
df_source_time['rep_time'] = df_source_time['rep_date'].apply(lambda x: dt.datetime.strptime(x.strftime('%H:%M'), '%H:%M'))
df_source_time.drop(['rep_date'], axis=1, inplace=True)
df_source = df_source.set_index('rep_date')
df_source_time = df_source_time.set_index('rep_time')
#Задаем необходимые переменные
#Window для расчета скользящего стандартного отклонения берем как, например, 1/12 от периода
v_window=8 #скользящее окно
k_out=1.5 #Коэффициент для умножения на std для расчета границ фильтрации выбросов для первого прогона
k_norm=1.5 #Коэффициент для умножения на std для расчета границ "нормального" трафика для второго прогона
i=df_source_time.index.shape[0]
x=np.linspace(-10,10,i)
#Вспомогательная функция для отсечения значений, выходящих за границы фильтрации выборосов
def f_out(x):
name=x.index[0]
if x[name] > x[name+'_lo']+k_out*x[name+'_std_first_step']:
x[name+'_adj']=np.nan
elif x[name] < x[name+'_lo']-k_out*x[name+'_std_first_step']:
x[name+'_adj']=np.nan
else:
x[name+'_adj']=x[name]
return x
#Функция для обработки данных.
#На вход функции подается объект Series из исходных данных.
#На выходе получаем данные с отсеченными выбросами ['lo'] и со стандартным отколонением для обработанных данных ['std'].
def f_low(df_x):
df_res=DataFrame(df_x)
name=df_res.columns[0]
i=df_x.index.shape[0]
x=np.linspace(-10,10,i)
df_res[name+'_lo'] = lo.lowess(x, df_x.values, x)
df_res[name+'_std_first_step'] = df_x.rolling(window=v_window,min_periods=0).std().fillna(method='bfill').shift(-int(v_window/2))
df_res=df_res.apply(f_out,axis=1)
df_res[name+'_adj_first_step']=df_res[name+'_adj'].fillna(method='bfill')
df_res[name+'_adj'] = lo.lowess(x, np.array(df_res[name+'_adj_first_step']), x)
df_res[name+'_std'] = df_res[name+'_adj_first_step'].rolling(window=v_window,min_periods=0).std().fillna(method='bfill').shift(-int(v_window/2))
return df_res
l=list(df_source_time.columns)
print( "Список полученных для анализа фич:\n{}".format(l) )
for name in l:
df=f_low(df_source_time[name].sort_index(axis=0))
display(df.head())
fig,ax = plt.subplots(1,figsize=(12,9))
ax.plot(df[name],'b.',label='Original') #исходный график
ax.plot(df[name+'_lo']+k_out*df[name+'_std_first_step'],'g',label='Границы фильтрации выбросов') #Верхняя граница для фильтрации выборосов
ax.plot(df[name+'_lo']-k_out*df[name+'_std_first_step'],'g',label='Границы фильтрации выбросов') #Нижняя граница для фильтрации выборосов
ax.plot(df[name+'_lo'],'r', label='Восстановленный график на первом шаге') #Восстановленный график методом lowess на первом шаге
ax.plot(df[name+'_adj']+k_norm*df[name+'_std'],'k', label='Верхняя граница нормального трафика') #Верхняя граница нормального трафика
ax.plot(df[name+'_adj']-k_norm*df[name+'_std'],'k', label='Нижняя граница нормального трафика') #Нижняя граница нормального трафика
ax.plot(df[name+'_adj'],'y', label='Восстановленный график на втором шаге') #Восстановленный график методом lowess на втором шаге
ax.set_title(name)
plt.legend()
plt.show()
for name in ['NAKA']:
df=f_low(df_source_time[name].sort_index(axis=0))
df[name+'_adj_avg'] = DataFrame(df[name+'_adj'].groupby(level=0).mean())
display(df.head())
fig,(ax1,ax2) = plt.subplots(1,2,figsize=(24,9))
ax1.plot(df[name],'b.',label='Original') #исходный график
#ax1.plot(df[name+'_lo']+k_out*df[name+'_std_first_step'],'g',label='Границы фильтрации выбросов') #Верхняя граница для фильтрации выборосов
#ax1.plot(df[name+'_lo']-k_out*df[name+'_std_first_step'],'g',label='Границы фильтрации выбросов') #Нижняя граница для фильтрации выборосов
#ax1.plot(df[name+'_lo'],'r', label='Восстановленный график на первом шаге') #Восстановленный график методом lowess на первом шаге
ax1.plot(df[name+'_adj_avg']+k_norm*df[name+'_std'],'k', label='Верхняя граница нормального трафика') #Верхняя граница нормального трафика
ax1.plot(df[name+'_adj_avg']-k_norm*df[name+'_std'],'k', label='Нижняя граница нормального трафика') #Нижняя граница нормального трафика
#ax1.plot(df[name+'_adj'],'y', label='Восстановленный график на втором шаге') #Восстановленный график методом lowess на втором шаге
ax1.plot(df[name+'_adj_avg'],'r', label='Восстановленный график на втором шаге') #Восстановленный график методом lowess на втором шаге
ax1.set_title(name)
ax1.legend()
ax2.plot(df_source[name])
ax2.legend()
plt.show()
df_s1 = df_source.copy()
df_s1['rep_time'] = df_source.index.values
df_s1['rep_time'] = df_s1['rep_time'].apply(lambda x: dt.datetime.strptime(x.strftime('%H:%M'), '%H:%M'))
df_s2 = pd.merge(df_s1, df, how='left', left_on='rep_time', right_index=True)
display(df_s2.head())
df_s2['lower'] = df_s2['NAKA_adj_avg'] - df_s2['NAKA_std'] * k_norm
df_s2['upper'] = df_s2['NAKA_adj_avg'] + df_s2['NAKA_std'] * k_norm
df_s2[['NAKA_x', 'NAKA_adj_avg', 'lower', 'upper']].plot(figsize=(12, 9))
plt.show() | code |
18143474/cell_3 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | import datetime as dt
import pandas as pd
df_source = pd.read_csv('../input/periodic-traffic-data/periodic_traffic.csv')
df_source['rep_date'] = pd.to_datetime(df_source['_time'])
df_source.drop(['_time'], axis=1, inplace=True)
df_source_time = df_source.copy()
df_source_time['rep_time'] = df_source_time['rep_date'].apply(lambda x: dt.datetime.strptime(x.strftime('%H:%M'), '%H:%M'))
df_source_time.drop(['rep_date'], axis=1, inplace=True)
df_source = df_source.set_index('rep_date')
df_source_time = df_source_time.set_index('rep_time')
print('Rows found in the DataFrame:\n{}\n'.format(len(df_source.index)))
display(df_source.tail(3))
display(df_source_time.tail(3)) | code |
18143474/cell_5 | [
"image_output_11.png",
"text_html_output_10.png",
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_5.png",
"image_output_5.png",
"image_output_7.png",
"text_html_output_9.png",
"image_output_4.png",
"image_output_8.png",
"text_html_output_1.png",
"image_output_6.png",
"text_plain_output_1.png",
"text_html_output_11.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"text_html_output_8.png",
"text_html_output_3.png",
"image_output_9.png",
"text_html_output_7.png"
] | from pandas import DataFrame
import datetime as dt
import lowess as lo
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_source = pd.read_csv('../input/periodic-traffic-data/periodic_traffic.csv')
df_source['rep_date'] = pd.to_datetime(df_source['_time'])
df_source.drop(['_time'], axis=1, inplace=True)
df_source_time = df_source.copy()
df_source_time['rep_time'] = df_source_time['rep_date'].apply(lambda x: dt.datetime.strptime(x.strftime('%H:%M'), '%H:%M'))
df_source_time.drop(['rep_date'], axis=1, inplace=True)
df_source = df_source.set_index('rep_date')
df_source_time = df_source_time.set_index('rep_time')
v_window = 8
k_out = 1.5
k_norm = 1.5
i = df_source_time.index.shape[0]
x = np.linspace(-10, 10, i)
def f_out(x):
name = x.index[0]
if x[name] > x[name + '_lo'] + k_out * x[name + '_std_first_step']:
x[name + '_adj'] = np.nan
elif x[name] < x[name + '_lo'] - k_out * x[name + '_std_first_step']:
x[name + '_adj'] = np.nan
else:
x[name + '_adj'] = x[name]
return x
def f_low(df_x):
df_res = DataFrame(df_x)
name = df_res.columns[0]
i = df_x.index.shape[0]
x = np.linspace(-10, 10, i)
df_res[name + '_lo'] = lo.lowess(x, df_x.values, x)
df_res[name + '_std_first_step'] = df_x.rolling(window=v_window, min_periods=0).std().fillna(method='bfill').shift(-int(v_window / 2))
df_res = df_res.apply(f_out, axis=1)
df_res[name + '_adj_first_step'] = df_res[name + '_adj'].fillna(method='bfill')
df_res[name + '_adj'] = lo.lowess(x, np.array(df_res[name + '_adj_first_step']), x)
df_res[name + '_std'] = df_res[name + '_adj_first_step'].rolling(window=v_window, min_periods=0).std().fillna(method='bfill').shift(-int(v_window / 2))
return df_res
l = list(df_source_time.columns)
print('Список полученных для анализа фич:\n{}'.format(l))
for name in l:
df = f_low(df_source_time[name].sort_index(axis=0))
display(df.head())
fig, ax = plt.subplots(1, figsize=(12, 9))
ax.plot(df[name], 'b.', label='Original')
ax.plot(df[name + '_lo'] + k_out * df[name + '_std_first_step'], 'g', label='Границы фильтрации выбросов')
ax.plot(df[name + '_lo'] - k_out * df[name + '_std_first_step'], 'g', label='Границы фильтрации выбросов')
ax.plot(df[name + '_lo'], 'r', label='Восстановленный график на первом шаге')
ax.plot(df[name + '_adj'] + k_norm * df[name + '_std'], 'k', label='Верхняя граница нормального трафика')
ax.plot(df[name + '_adj'] - k_norm * df[name + '_std'], 'k', label='Нижняя граница нормального трафика')
ax.plot(df[name + '_adj'], 'y', label='Восстановленный график на втором шаге')
ax.set_title(name)
plt.legend()
plt.show() | code |
330145/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
df_train.head(2) | code |
330145/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
def data_cleanser(data, is_train):
def adjust_dates(dates, diff):
return dates - diff
if is_train:
df_dates = data['date_x']
diff = df_dates.max() - df_dates.min()
diff2 = df_dates.max() - pd.Timestamp(pd.datetime.now().date())
diffdays = diff + diff2
data['adj_date'] = adjust_dates(data['date_x'], diffdays)
return data.drop(['date_x'], axis=1)
data_cleanser(df_train, True).head() | code |
330145/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
for d in ['date_x', 'date_y']:
print('Start of ' + d + ': ' + str(df_train[d].min().date()))
print(' End of ' + d + ': ' + str(df_train[d].max().date()))
print('Range of ' + d + ': ' + str(df_train[d].max() - df_train[d].min()) + '\n') | code |
2020968/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
holdout = pd.read_csv('../input/test.csv')
columns = ['SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train[columns].describe(include='all', percentiles=[]) | code |
2020968/cell_23 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train = pd.read_csv('../input/train.csv')
holdout = pd.read_csv('../input/test.csv')
chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived'])
def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']):
df_pivot = df.pivot_table(index=index, values='Survived')
if num_xticks > 0:
plt.xticks(range(num_xticks), xticks)
cut_points = [-1, 0, 5, 12, 18, 35, 60, 100]
label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior']
def process_age(df, cut_points, label_names):
df['Age'] = df['Age'].fillna(-0.5)
df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names)
return df
train = process_age(train, cut_points, label_names)
holdout = process_age(holdout, cut_points, label_names)
def process_fare(df, cut_points, label_names):
df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names)
return df
train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$'])
plot_survival(train, 'Fare_categories', use_index=False, num_xticks=len(train['Fare_categories'].unique()) - 1, xticks=train['Fare_categories'].unique().sort_values()) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.