path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88100444/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.head()
code
88100444/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88100444/cell_7
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', '3Pper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) bd_model.fit(train_X, train_y) from sklearn.metrics import mean_absolute_error val_predictions = bd_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) print(val_mae)
code
88100444/cell_8
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', '3Pper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) bd_model.fit(train_X, train_y) from sklearn.metrics import mean_absolute_error val_predictions = bd_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) set_up = [[25.7, 58.3, 33.9, 12.5, 17.5, 16.7, 71.4]] bd_model.predict(set_up)
code
88100444/cell_3
[ "image_output_1.png" ]
from matplotlib.pyplot import figure import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure figure(figsize=(8, 6), dpi=80) corrMatrix = big_dance.corr() sn.heatmap(corrMatrix, annot=False) plt.show()
code
88100444/cell_5
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', '3Pper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y)
code
1005328/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor import pandas as pd import pandas as pd with open('../input/train.json') as train_json: raw_train = pd.read_json(train_json.read()).reset_index() from sklearn.neighbors import KNeighborsRegressor model = KNeighborsRegressor(n_neighbors=300) price_df = pd.concat([raw_train['bedrooms'], raw_train['bathrooms'], raw_train['latitude'], raw_train['longitude'], raw_train['price']], axis=1) model.fit(price_df.drop(['price'], axis=1), price_df['price']) print(model.kneighbors(price_df.drop(['price'], axis=1).loc[2].reshape(1, -1), n_neighbors=300))
code
1005328/cell_2
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor import pandas as pd import pandas as pd with open('../input/train.json') as train_json: raw_train = pd.read_json(train_json.read()).reset_index() from sklearn.neighbors import KNeighborsRegressor model = KNeighborsRegressor(n_neighbors=300) price_df = pd.concat([raw_train['bedrooms'], raw_train['bathrooms'], raw_train['latitude'], raw_train['longitude'], raw_train['price']], axis=1) model.fit(price_df.drop(['price'], axis=1), price_df['price'])
code
1005328/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns new_price_df = price_df[price_df['pred_price_ratio'] < 4] plt.figure(figsize=(10, 20)) sns.boxplot(x='interest_level', y='pred_price_ratio', data=new_price_df) plt.title('Interest Level and Price / Predicted Price Ratio', fontsize=32) plt.show()
code
1005328/cell_5
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor import pandas as pd import pandas as pd with open('../input/train.json') as train_json: raw_train = pd.read_json(train_json.read()).reset_index() from sklearn.neighbors import KNeighborsRegressor model = KNeighborsRegressor(n_neighbors=300) price_df = pd.concat([raw_train['bedrooms'], raw_train['bathrooms'], raw_train['latitude'], raw_train['longitude'], raw_train['price']], axis=1) model.fit(price_df.drop(['price'], axis=1), price_df['price']) print(price_df.drop(['price'], axis=1).loc[2]) print(price_df.drop(['price'], axis=1).loc[311])
code
33119806/cell_21
[ "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'neutral']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'positive']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'negative']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') sns.countplot(df.sentiment)
code
33119806/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) df.info()
code
33119806/cell_34
[ "text_plain_output_1.png" ]
model_outputs
code
33119806/cell_30
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from simpletransformers.classification import ClassificationModel import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False) def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) model.train_model(train_df)
code
33119806/cell_33
[ "text_plain_output_4.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
result
code
33119806/cell_44
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from simpletransformers.classification import ClassificationModel import numpy as np import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False) def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) model.train_model(train_df) result, model_outputs, wrong_predictions = model.eval_model(eval_df) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) def get_result(statement): result = model.predict([statement]) pos = np.where(result[1][0] == np.amax(result[1][0])) pos = int(pos[0]) sentiment_dict = {0: 'positive', 1: 'negative', 2: 'neutral'} return get_result("According to the company 's updated strategy for the years 2009-2012 , Basware targets a long-term net sales growth in the range of 20 % -40 % with an operating profit margin of 10 % -20 % of net sales .")
code
33119806/cell_40
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import sklearn df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) true = eval_df['label'].tolist() predicted = lst import sklearn mat = sklearn.metrics.confusion_matrix(true, predicted) mat sklearn.metrics.classification_report(true, predicted, target_names=['positive', 'neutral', 'negative']) sklearn.metrics.accuracy_score(true, predicted)
code
33119806/cell_39
[ "image_output_1.png" ]
import numpy as np import pandas as pd import sklearn df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) true = eval_df['label'].tolist() predicted = lst import sklearn mat = sklearn.metrics.confusion_matrix(true, predicted) mat sklearn.metrics.classification_report(true, predicted, target_names=['positive', 'neutral', 'negative'])
code
33119806/cell_26
[ "text_plain_output_1.png" ]
!pip install simpletransformers
code
33119806/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.info()
code
33119806/cell_19
[ "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'neutral']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'positive']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'negative']]) wordcloud = WordCloud(background_color='white').generate(text) plt.figure(figsize=(8, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
33119806/cell_45
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from simpletransformers.classification import ClassificationModel import numpy as np import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False) def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) model.train_model(train_df) result, model_outputs, wrong_predictions = model.eval_model(eval_df) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) def get_result(statement): result = model.predict([statement]) pos = np.where(result[1][0] == np.amax(result[1][0])) pos = int(pos[0]) sentiment_dict = {0: 'positive', 1: 'negative', 2: 'neutral'} return get_result('Sales in Finland decreased by 2.0 % , and international sales decreased by 9.3 % in terms of euros , and by 15.1 % in terms of local currencies .')
code
33119806/cell_18
[ "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'neutral']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'positive']]) wordcloud = WordCloud(background_color='white').generate(text) plt.figure(figsize=(8, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
33119806/cell_28
[ "text_plain_output_5.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) print(train.shape)
code
33119806/cell_8
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df.head()
code
33119806/cell_16
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.figure(figsize=(8, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
33119806/cell_38
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'neutral']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'positive']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'negative']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) true = eval_df['label'].tolist() predicted = lst import sklearn mat = sklearn.metrics.confusion_matrix(true, predicted) mat df_cm = pd.DataFrame(mat, range(3), range(3)) sns.heatmap(df_cm, annot=True) plt.show()
code
33119806/cell_17
[ "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'neutral']]) wordcloud = WordCloud(background_color='white').generate(text) plt.figure(figsize=(8, 6)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
33119806/cell_43
[ "text_plain_output_1.png" ]
from simpletransformers.classification import ClassificationModel import numpy as np import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False) def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) model.train_model(train_df) result, model_outputs, wrong_predictions = model.eval_model(eval_df) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) def get_result(statement): result = model.predict([statement]) pos = np.where(result[1][0] == np.amax(result[1][0])) pos = int(pos[0]) sentiment_dict = {0: 'positive', 1: 'negative', 2: 'neutral'} return get_result('According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .')
code
33119806/cell_31
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_8.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from simpletransformers.classification import ClassificationModel import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False) def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) model.train_model(train_df) result, model_outputs, wrong_predictions = model.eval_model(eval_df)
code
33119806/cell_46
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from simpletransformers.classification import ClassificationModel import numpy as np import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False) def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) model.train_model(train_df) result, model_outputs, wrong_predictions = model.eval_model(eval_df) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) def get_result(statement): result = model.predict([statement]) pos = np.where(result[1][0] == np.amax(result[1][0])) pos = int(pos[0]) sentiment_dict = {0: 'positive', 1: 'negative', 2: 'neutral'} return statement = 'Give your statement' get_result(statement)
code
33119806/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) df.describe()
code
33119806/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.drop_duplicates(subset=['statement'], keep='first', inplace=True) text = ' '.join([x for x in df.statement]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'neutral']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'positive']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') text = ' '.join([x for x in df.statement[df.sentiment == 'negative']]) wordcloud = WordCloud(background_color='white').generate(text) plt.axis('off') df['sentiment'].value_counts()
code
33119806/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape
code
33119806/cell_27
[ "text_plain_output_1.png" ]
from simpletransformers.classification import ClassificationModel from simpletransformers.classification import ClassificationModel model = ClassificationModel('bert', 'bert-base-cased', num_labels=3, args={'reprocess_input_data': True, 'overwrite_output_dir': True}, use_cuda=False)
code
33119806/cell_37
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import sklearn df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') def making_label(st): if st == 'positive': return 0 elif st == 'neutral': return 2 else: return 1 train['label'] = train['sentiment'].apply(making_label) eva['label'] = eva['sentiment'].apply(making_label) train_df = pd.DataFrame({'text': train['statement'][:1500].replace('\\n', ' ', regex=True), 'label': train['label'][:1500]}) eval_df = pd.DataFrame({'text': eva['statement'][-400:].replace('\\n', ' ', regex=True), 'label': eva['label'][-400:]}) lst = [] for arr in model_outputs: lst.append(np.argmax(arr)) true = eval_df['label'].tolist() predicted = lst import sklearn mat = sklearn.metrics.confusion_matrix(true, predicted) mat
code
33119806/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', encoding='ISO-8859-1') df = df.rename(columns={'neutral': 'sentiment', 'According to Gran , the company has no plans to move all production to Russia , although that is where the company is growing .': 'statement'}) df.shape df.describe()
code
90144662/cell_13
[ "text_plain_output_1.png" ]
from pdf2image import convert_from_path,convert_from_bytes import easyocr PDF_file = '../input/osbook/operating-system-concepts-9th-edition.pdf' '\nPart #1 : Converting PDF to images\n' pages = convert_from_path(PDF_file, dpi=100, thread_count=4) type(pages[0]) image_counter = 1 for page in pages: filename = 'page_' + str(image_counter) + '.jpg' page.save(filename, 'JPEG') image_counter = image_counter + 1 reader = easyocr.Reader(['en']) filelimit = image_counter - 1 outfile = 'out_text.txt' text = '' f = open(outfile, 'a') for i in range(1, image_counter - 1): filename = 'page_' + str(i) + '.jpg' result = reader.readtext(filename, paragraph='True') for i in result: text = text + '\n' + i[1] PDF_file = '../input/osbook/2011_EST_OS.pdf' image_counter = 1 pages = convert_from_path(PDF_file, dpi=150, thread_count=4, last_page=200) fileAll = [] for page in pages: filename = 'page_' + str(image_counter) + '.jpg' page.save(filename, 'JPEG') image_counter = image_counter + 1 fileAll.append(filename) reader = easyocr.Reader(['en']) a = reader.readtext_batched(fileAll[:100], paragraph='True') for i in a: print() for j in i: print(j)
code
90144662/cell_4
[ "text_plain_output_1.png" ]
!apt-get install poppler-utils -y
code
90144662/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90144662/cell_14
[ "text_plain_output_1.png" ]
from pdf2image import convert_from_path,convert_from_bytes import easyocr PDF_file = '../input/osbook/operating-system-concepts-9th-edition.pdf' '\nPart #1 : Converting PDF to images\n' pages = convert_from_path(PDF_file, dpi=100, thread_count=4) type(pages[0]) image_counter = 1 for page in pages: filename = 'page_' + str(image_counter) + '.jpg' page.save(filename, 'JPEG') image_counter = image_counter + 1 reader = easyocr.Reader(['en']) filelimit = image_counter - 1 outfile = 'out_text.txt' text = '' f = open(outfile, 'a') for i in range(1, image_counter - 1): filename = 'page_' + str(i) + '.jpg' result = reader.readtext(filename, paragraph='True') for i in result: text = text + '\n' + i[1] PDF_file = '../input/osbook/2011_EST_OS.pdf' image_counter = 1 pages = convert_from_path(PDF_file, dpi=150, thread_count=4, last_page=200) fileAll = [] for page in pages: filename = 'page_' + str(image_counter) + '.jpg' page.save(filename, 'JPEG') image_counter = image_counter + 1 fileAll.append(filename) reader = easyocr.Reader(['en']) a = reader.readtext_batched(fileAll[:100], paragraph='True') for i in a: print() for j in i: print(j[1])
code
16164242/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import KFold import pandas as pd TEST_SIZE = 0.33 FIGSIZE = (10, 6) SAVE_PICKLE = True FREE_MEMORY = True RANDOM_STATE = 123 N_SPLITS = 3 VERBOSE = False DATA_PATH = '../input' def csv_path(dataset='train', data_path=DATA_PATH): """ """ return '{}/{}.csv'.format(data_path, dataset) def read_data(dataset='train', data_path=DATA_PATH): """ """ index_col = None index_type = ['train', 'test'] if dataset in index_type: index_col = 'id' data_path = csv_path(dataset, data_path=data_path) return pd.read_csv(data_path, index_col=index_col) train = read_data('train') test = read_data('test') molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()}) molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()}) structures = read_data('structures') atom_list_df = structures.groupby('molecule_name')['atom'].apply(list) atom_list_df = atom_list_df.to_frame() if FREE_MEMORY: del train del test molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name') molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name') potential_energy = read_data('potential_energy') molecule_train = pd.merge(molecule_train, potential_energy) if FREE_MEMORY: del potential_energy id_feature = 'molecule_name' target_feature = (set(molecule_train) - set(molecule_test)).pop() selected_features = list(molecule_test) selected_features.remove(id_feature) selected_features.remove('atom') X = molecule_train[selected_features] y = molecule_train[target_feature] kfold = KFold(n_splits=N_SPLITS, random_state=RANDOM_STATE) fold = 0 r2_scores = [] mse_scores = [] lin_reg = LinearRegression() for in_index, oof_index in kfold.split(X, y): fold += 1 print('- Training Fold: ({}/{})'.format(fold, N_SPLITS)) X_in, X_oof = (X.loc[in_index], X.loc[oof_index]) y_in, y_oof = (y.loc[in_index], y.loc[oof_index]) lin_reg.fit(X_in, y_in) y_pred = lin_reg.predict(X_oof) r2 = r2_score(y_oof, y_pred) r2_scores.append(r2) mse_score = mean_squared_error(y_oof, y_pred) mse_scores.append(mse_score) print('\t Variance score: \t{:.4f}'.format(r2)) print('\t Mean squared error: \t{:.4f}'.format(mse_score))
code
16164242/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd TEST_SIZE = 0.33 FIGSIZE = (10, 6) SAVE_PICKLE = True FREE_MEMORY = True RANDOM_STATE = 123 N_SPLITS = 3 VERBOSE = False DATA_PATH = '../input' def csv_path(dataset='train', data_path=DATA_PATH): """ """ return '{}/{}.csv'.format(data_path, dataset) def read_data(dataset='train', data_path=DATA_PATH): """ """ index_col = None index_type = ['train', 'test'] if dataset in index_type: index_col = 'id' data_path = csv_path(dataset, data_path=data_path) return pd.read_csv(data_path, index_col=index_col) train = read_data('train') test = read_data('test') molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()}) molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()}) structures = read_data('structures') atom_list_df = structures.groupby('molecule_name')['atom'].apply(list) atom_list_df = atom_list_df.to_frame() if FREE_MEMORY: del train del test molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name') molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name') potential_energy = read_data('potential_energy') molecule_train = pd.merge(molecule_train, potential_energy) if FREE_MEMORY: del potential_energy molecule_train.head()
code
16164242/cell_2
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error, r2_score from sklearn.linear_model import LinearRegression import os print(os.listdir('../input'))
code
16164242/cell_18
[ "text_html_output_1.png" ]
import pandas as pd TEST_SIZE = 0.33 FIGSIZE = (10, 6) SAVE_PICKLE = True FREE_MEMORY = True RANDOM_STATE = 123 N_SPLITS = 3 VERBOSE = False DATA_PATH = '../input' def csv_path(dataset='train', data_path=DATA_PATH): """ """ return '{}/{}.csv'.format(data_path, dataset) def read_data(dataset='train', data_path=DATA_PATH): """ """ index_col = None index_type = ['train', 'test'] if dataset in index_type: index_col = 'id' data_path = csv_path(dataset, data_path=data_path) return pd.read_csv(data_path, index_col=index_col) train = read_data('train') test = read_data('test') molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()}) molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()}) structures = read_data('structures') atom_list_df = structures.groupby('molecule_name')['atom'].apply(list) atom_list_df = atom_list_df.to_frame() if FREE_MEMORY: del train del test molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name') molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name') potential_energy = read_data('potential_energy') molecule_train = pd.merge(molecule_train, potential_energy) if FREE_MEMORY: del potential_energy id_feature = 'molecule_name' target_feature = (set(molecule_train) - set(molecule_test)).pop() selected_features = list(molecule_test) selected_features.remove(id_feature) selected_features.remove('atom') print('Selected Features: \t{}'.format(selected_features)) print('Target Feature: \t{}'.format(target_feature)) print('Id Feature: \t\t{}'.format(id_feature))
code
16164242/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd DATA_PATH = '../input' def csv_path(dataset='train', data_path=DATA_PATH): """ """ return '{}/{}.csv'.format(data_path, dataset) def read_data(dataset='train', data_path=DATA_PATH): """ """ index_col = None index_type = ['train', 'test'] if dataset in index_type: index_col = 'id' data_path = csv_path(dataset, data_path=data_path) return pd.read_csv(data_path, index_col=index_col) train = read_data('train') test = read_data('test')
code
16164242/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd TEST_SIZE = 0.33 FIGSIZE = (10, 6) SAVE_PICKLE = True FREE_MEMORY = True RANDOM_STATE = 123 N_SPLITS = 3 VERBOSE = False DATA_PATH = '../input' def csv_path(dataset='train', data_path=DATA_PATH): """ """ return '{}/{}.csv'.format(data_path, dataset) def read_data(dataset='train', data_path=DATA_PATH): """ """ index_col = None index_type = ['train', 'test'] if dataset in index_type: index_col = 'id' data_path = csv_path(dataset, data_path=data_path) return pd.read_csv(data_path, index_col=index_col) train = read_data('train') test = read_data('test') molecule_train = pd.DataFrame({'molecule_name': train['molecule_name'].unique()}) molecule_test = pd.DataFrame({'molecule_name': test['molecule_name'].unique()}) structures = read_data('structures') atom_list_df = structures.groupby('molecule_name')['atom'].apply(list) atom_list_df = atom_list_df.to_frame() if FREE_MEMORY: del train del test molecule_train = pd.merge(molecule_train, atom_list_df, how='left', on='molecule_name') molecule_test = pd.merge(molecule_test, atom_list_df, how='left', on='molecule_name') potential_energy = read_data('potential_energy') molecule_train = pd.merge(molecule_train, potential_energy) if FREE_MEMORY: del potential_energy plt.figure(figsize=FIGSIZE) molecule_train['potential_energy'].plot(kind='kde') plt.show()
code
18154734/cell_9
[ "image_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] LANG_FILENAMES[0:5] data_lm = TextLMDataBunch.from_csv(LM_PATH, 'wiki_bangla_corpus.csv', text_cols='text') learner = language_model_learner(data_lm, AWD_LSTM, pretrained=False, metrics=accuracy) learner.lr_find() learner.recorder.plot()
code
18154734/cell_6
[ "text_html_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] LANG_FILENAMES[0:5] data_lm = TextLMDataBunch.from_csv(LM_PATH, 'wiki_bangla_corpus.csv', text_cols='text') data_lm.show_batch()
code
18154734/cell_2
[ "image_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] print(len(LANG_FILENAMES)) LANG_FILENAMES[0:5]
code
18154734/cell_11
[ "text_html_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] LANG_FILENAMES[0:5] data_lm = TextLMDataBunch.from_csv(LM_PATH, 'wiki_bangla_corpus.csv', text_cols='text') learner = language_model_learner(data_lm, AWD_LSTM, pretrained=False, metrics=accuracy) learner.lr_find() learner.fit_one_cycle(15, 0.02) learner.recorder.plot_losses()
code
18154734/cell_8
[ "image_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] LANG_FILENAMES[0:5] data_lm = TextLMDataBunch.from_csv(LM_PATH, 'wiki_bangla_corpus.csv', text_cols='text') learner = language_model_learner(data_lm, AWD_LSTM, pretrained=False, metrics=accuracy) learner.lr_find()
code
18154734/cell_10
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] LANG_FILENAMES[0:5] data_lm = TextLMDataBunch.from_csv(LM_PATH, 'wiki_bangla_corpus.csv', text_cols='text') learner = language_model_learner(data_lm, AWD_LSTM, pretrained=False, metrics=accuracy) learner.lr_find() learner.fit_one_cycle(15, 0.02)
code
18154734/cell_12
[ "text_plain_output_1.png" ]
from fastai.text import * import html import json from sklearn.model_selection import train_test_split BOS = 'xbos' FLD = 'xfld' PATH = Path('/kaggle/input/lolol/lolol') LM_PATH = Path('/temp') LM_PATH.mkdir(exist_ok=True) LANG_FILENAMES = [str(f) for f in PATH.rglob('*/*')] LANG_FILENAMES[0:5] data_lm = TextLMDataBunch.from_csv(LM_PATH, 'wiki_bangla_corpus.csv', text_cols='text') learner = language_model_learner(data_lm, AWD_LSTM, pretrained=False, metrics=accuracy) learner.lr_find() learner.fit_one_cycle(15, 0.02) learner.recorder.plot_metrics()
code
33105253/cell_9
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns train.info()
code
33105253/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns train.head()
code
33105253/cell_2
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33105253/cell_7
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "text_plain_output_3.png", "image_output_4.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns train.describe()
code
33105253/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns def barPlot(feature): temp = train[feature] tempValue = temp.value_counts() plt.xticks(tempValue.index, tempValue.index.values) def plotHist(feature): pass numeric = ['Fare', 'Age', 'PassengerId'] for n in numeric: plotHist(n)
code
33105253/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns def barPlot(feature): temp = train[feature] tempValue = temp.value_counts() plt.xticks(tempValue.index, tempValue.index.values) category = ['Survived', 'Sex', 'Pclass', 'Embarked', 'SibSp', 'Parch'] for c in category: barPlot(c)
code
33105253/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns
code
34121307/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') train.isnull().sum() train.drop(['Dates', 'Category', 'Descript', 'DayOfWeek', 'Address'], inplace=True, axis=1) test.drop(['Id', 'Dates', 'DayOfWeek', 'Address'], inplace=True, axis=1) train.nunique() from sklearn.preprocessing import LabelEncoder le_PdDistrict = LabelEncoder() le_Resolution = LabelEncoder() le_X = LabelEncoder() le_Y = LabelEncoder() train['PdDistrict'] = le_PdDistrict.fit_transform(train['PdDistrict']) train['Resolution'] = le_PdDistrict.fit_transform(train['Resolution']) train['X'] = le_PdDistrict.fit_transform(train['X']) train['Y'] = le_PdDistrict.fit_transform(train['Y']) test['PdDistrict'] = le_PdDistrict.transform(test['PdDistrict']) test['X'] = le_PdDistrict.transform(test['X']) test['Y'] = le_PdDistrict.transform(test['Y'])
code
34121307/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') train.head()
code
34121307/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') test.info()
code
34121307/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34121307/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') train.isnull().sum() train.drop(['Dates', 'Category', 'Descript', 'DayOfWeek', 'Address'], inplace=True, axis=1) test.drop(['Id', 'Dates', 'DayOfWeek', 'Address'], inplace=True, axis=1) train.nunique() train.head()
code
34121307/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') train.isnull().sum() print(train['Resolution'].unique()) len(train['Resolution'].unique())
code
34121307/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') test.head()
code
34121307/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') train.isnull().sum() train.drop(['Dates', 'Category', 'Descript', 'DayOfWeek', 'Address'], inplace=True, axis=1) test.drop(['Id', 'Dates', 'DayOfWeek', 'Address'], inplace=True, axis=1) print(test.nunique()) train.nunique()
code
34121307/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sf-crime/train.csv.zip') test = pd.read_csv('/kaggle/input/sf-crime/test.csv.zip') train.isnull().sum()
code
1007790/cell_2
[ "image_output_1.png" ]
from sklearn import cluster, datasets import matplotlib.pyplot as plt import numpy as np import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster, datasets from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05) blobs = datasets.make_blobs(n_samples=n_samples, random_state=8) no_structure = (np.random.rand(n_samples, 2), None) colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) clustering_names = ['MiniBatchKMeans', 'AffinityPropagation', 'MeanShift', 'SpectralClustering', 'Ward', 'AgglomerativeClustering', 'DBSCAN', 'Birch'] plot_num = 1 c = [[0, 0]] a = [[0, 0]] b = [[0, 0]] d = [[0, 0]] mu = 0.3 for x, y in [(0, 0.45), (0.9, 0.5), (0.45, 0.9), (0.45, 0)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T a = np.vstack((a, nums)) for x, y in [(0, 0.2), (0.8, 0), (0.2, 1), (1, 0.8)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T b = np.vstack((b, nums)) for x, y in [(0, 0), (0.9, 0.9), (0, 0.9), (0.9, 0)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T c = np.vstack((c, nums)) for x, y in [(0, 0), (0.9, 0), (0.45, 0.779), (0.45, 0.259)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T d = np.vstack((d, nums)) plt.scatter(d[:, 0], d[:, 1]) plt.show() c = (c, None) a = (a, None) b = (b, None) d = (d, None)
code
1007790/cell_1
[ "text_plain_output_1.png" ]
from sklearn import cluster, datasets import matplotlib.pyplot as plt import numpy as np import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster, datasets from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05) blobs = datasets.make_blobs(n_samples=n_samples, random_state=8) no_structure = (np.random.rand(n_samples, 2), None) colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) clustering_names = ['MiniBatchKMeans', 'AffinityPropagation', 'MeanShift', 'SpectralClustering', 'Ward', 'AgglomerativeClustering', 'DBSCAN', 'Birch'] plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5)) plt.subplots_adjust(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=0.05, hspace=0.01) plot_num = 1
code
1007790/cell_3
[ "image_output_1.png" ]
from sklearn import cluster, datasets from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import time import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster, datasets from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05) blobs = datasets.make_blobs(n_samples=n_samples, random_state=8) no_structure = (np.random.rand(n_samples, 2), None) colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) clustering_names = ['MiniBatchKMeans', 'AffinityPropagation', 'MeanShift', 'SpectralClustering', 'Ward', 'AgglomerativeClustering', 'DBSCAN', 'Birch'] plot_num = 1 c = [[0, 0]] a = [[0, 0]] b = [[0, 0]] d = [[0, 0]] mu = 0.3 for x, y in [(0, 0.45), (0.9, 0.5), (0.45, 0.9), (0.45, 0)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T a = np.vstack((a, nums)) for x, y in [(0, 0.2), (0.8, 0), (0.2, 1), (1, 0.8)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T b = np.vstack((b, nums)) for x, y in [(0, 0), (0.9, 0.9), (0, 0.9), (0.9, 0)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T c = np.vstack((c, nums)) for x, y in [(0, 0), (0.9, 0), (0.45, 0.779), (0.45, 0.259)]: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T d = np.vstack((d, nums)) c = (c, None) a = (a, None) b = (b, None) d = (d, None) datasets = [d, a, b, c] for i_dataset, dataset in enumerate(datasets): X, y = dataset X = StandardScaler().fit_transform(X) bandwidth = cluster.estimate_bandwidth(X, quantile=0.3) connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) connectivity = 0.5 * (connectivity + connectivity.T) ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True) two_means = cluster.MiniBatchKMeans(n_clusters=4) ward = cluster.AgglomerativeClustering(n_clusters=4, linkage='ward', connectivity=connectivity) spectral = cluster.SpectralClustering(n_clusters=4, eigen_solver='arpack', affinity='nearest_neighbors') dbscan = cluster.DBSCAN(eps=0.23, min_samples=30) affinity_propagation = cluster.AffinityPropagation(damping=0.9, preference=-200) average_linkage = cluster.AgglomerativeClustering(linkage='average', affinity='cityblock', n_clusters=4, connectivity=connectivity) birch = cluster.Birch(n_clusters=4) clustering_algorithms = [two_means, affinity_propagation, ms, spectral, ward, average_linkage, dbscan, birch] for name, algorithm in zip(clustering_names, clustering_algorithms): t0 = time.time() algorithm.fit(X) t1 = time.time() if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) plt.subplot(4, len(clustering_algorithms), plot_num) if i_dataset == 0: plt.title(name, size=18) plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10) if hasattr(algorithm, 'cluster_centers_'): centers = algorithm.cluster_centers_ center_colors = colors[:len(centers)] plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors) plt.xlim(-2, 2) plt.ylim(-2, 2) plt.xticks(()) plt.yticks(()) plt.text(0.99, 0.01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right') plot_num += 1 plt.show()
code
89127043/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train_df = pd.read_csv('../input/santander-customer-satisfaction/train.csv') test_df = pd.read_csv('../input/santander-customer-satisfaction/test.csv') sample_0 = np.where(train_df.TARGET == 0)[0] sample_size = train_df.TARGET.value_counts()[1] sample_loc = list(np.random.choice(sample_0, sample_size)) target_1_loc = list(np.where(train_df.TARGET == 1)[0]) sample_loc.extend(target_1_loc) sample_loc.__len__()
code
89127043/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train_df = pd.read_csv('../input/santander-customer-satisfaction/train.csv') test_df = pd.read_csv('../input/santander-customer-satisfaction/test.csv') plt.figure(figsize=(10, 10)) sns.barplot(x=train_df.TARGET.unique(), y=train_df.TARGET.value_counts(), palette='Pastel1')
code
320432/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() full.Embarked.value_counts().plot(kind='bar')
code
320432/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) titanic.CabinSide.value_counts()
code
320432/cell_20
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) test[np.isnan(test['Fare'])] full[np.isnan(full['Age'])].Title.unique()
code
320432/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) titanic.info() print('-' * 40) test.info()
code
320432/cell_39
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) test.loc[test['PassengerId'] == 1044, 'Fare'] = full[(full['Embarked'] == 'S') & (full['Pclass'] == 3)].Fare.median() test.loc[test['PassengerId'] == 1044, :] test.loc[test['Name'].str.contains('Bowen,'), 'Cabin'] = 'B68' test.loc[test.Cabin.str.len() == 5, :]
code
320432/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) titanic.CabinSide.value_counts() titanic.loc[titanic.Cabin.str.len() == 5, :] titanic.Deck.value_counts()
code
320432/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) full['Title'].value_counts()
code
320432/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts()
code
320432/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) full.head()
code
320432/cell_43
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) titanic.CabinSide.value_counts() titanic.loc[titanic.Cabin.str.len() == 5, :] titanic.Deck.value_counts() titanic.loc[titanic['Deck'] == 'T', :]
code
320432/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) test.loc[test['PassengerId'] == 1044, 'Fare'] = full[(full['Embarked'] == 'S') & (full['Pclass'] == 3)].Fare.median() test.loc[test['PassengerId'] == 1044, :]
code
320432/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) full[full['Title'] == 'Ms']
code
320432/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) full.Embarked.value_counts() titanic.Embarked.fillna(value='S', inplace=True) full.Embarked.fillna(value='S', inplace=True) titanic.CabinSide.value_counts() titanic.loc[titanic.Cabin.str.len() == 5, :]
code
320432/cell_12
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([titanic, test]) test[np.isnan(test['Fare'])]
code
32062669/cell_21
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler from torchvision import transforms import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch gpu_status = torch.cuda.is_available() train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) class LeafDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.transform = transform def __len__(self): return self.df.shape[0] def __getitem__(self, idx): img_src = '../input/plant-pathology-2020-fgvc7/images/' + self.df.loc[idx, 'image_id'] + '.jpg' image = PIL.Image.open(img_src).convert('RGB') if self.transform: image = self.transform(image) if self.df.shape[1] == 5: labels = self.df.loc[idx, ['healthy', 'multiple_diseases', 'rust', 'scab']].values labels = torch.from_numpy(labels.astype(np.uint8)) labels = labels.unsqueeze(-1).long() labels = labels.numpy().tolist().index([1]) labels = torch.from_numpy(np.asarray(labels)) return (image, labels) else: return image leaf_sample_dataset = LeafDataset(df=train_df, transform=None) fig, ax = plt.subplots(1,3) for i in range(3): img, label = leaf_sample_dataset[i] ax[i].imshow(img) print(type(img), img.size,label) leaf_transform = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop((384, 384)), transforms.RandomAffine(degrees=15), transforms.RandomHorizontalFlip(p=0.4), transforms.RandomVerticalFlip(p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) leaf_train_dataset = LeafDataset(df=train_df, transform=leaf_transform) leaf_train_loader = DataLoader(leaf_train_dataset, shuffle=True, batch_size=16) images, labels = next(iter(leaf_train_loader)) test_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/test.csv') leaf_test_dataset = LeafDataset(df=test_df, transform=leaf_transform) leaf_test_loader = DataLoader(leaf_test_dataset, batch_size=64) test_images = next(iter(leaf_test_loader)) print(len(leaf_test_dataset))
code
32062669/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) leaf_sample_dataset = LeafDataset(df=train_df, transform=None) fig, ax = plt.subplots(1, 3) for i in range(3): img, label = leaf_sample_dataset[i] ax[i].imshow(img) print(type(img), img.size, label)
code
32062669/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) train_df_copy.drop(['healthy', 'multiple_diseases', 'rust', 'scab'], axis=1, inplace=True) train_df_copy.head(5)
code
32062669/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') train_df.head(5)
code
32062669/cell_23
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler from torchvision import transforms import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch gpu_status = torch.cuda.is_available() train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) class LeafDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.transform = transform def __len__(self): return self.df.shape[0] def __getitem__(self, idx): img_src = '../input/plant-pathology-2020-fgvc7/images/' + self.df.loc[idx, 'image_id'] + '.jpg' image = PIL.Image.open(img_src).convert('RGB') if self.transform: image = self.transform(image) if self.df.shape[1] == 5: labels = self.df.loc[idx, ['healthy', 'multiple_diseases', 'rust', 'scab']].values labels = torch.from_numpy(labels.astype(np.uint8)) labels = labels.unsqueeze(-1).long() labels = labels.numpy().tolist().index([1]) labels = torch.from_numpy(np.asarray(labels)) return (image, labels) else: return image leaf_sample_dataset = LeafDataset(df=train_df, transform=None) fig, ax = plt.subplots(1,3) for i in range(3): img, label = leaf_sample_dataset[i] ax[i].imshow(img) print(type(img), img.size,label) leaf_transform = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop((384, 384)), transforms.RandomAffine(degrees=15), transforms.RandomHorizontalFlip(p=0.4), transforms.RandomVerticalFlip(p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) leaf_train_dataset = LeafDataset(df=train_df, transform=leaf_transform) leaf_train_loader = DataLoader(leaf_train_dataset, shuffle=True, batch_size=16) images, labels = next(iter(leaf_train_loader)) dataset_size = len(leaf_train_dataset) indices = list(range(dataset_size)) np.random.shuffle(indices) split = int(np.floor(0.2 * dataset_size)) train_idx, val_idx = (indices[split:], indices[:split]) train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(val_idx) leaf_train_loader = DataLoader(leaf_train_dataset, sampler=train_sampler, batch_size=64) leaf_valid_loader = DataLoader(leaf_train_dataset, sampler=valid_sampler, batch_size=64) test_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/test.csv') leaf_test_dataset = LeafDataset(df=test_df, transform=leaf_transform) leaf_test_loader = DataLoader(leaf_test_dataset, batch_size=64) test_images = next(iter(leaf_test_loader)) diagnosis = ['healthy', 'multiple_diseases', 'rust', 'scab'] train_images, train_labels = next(iter(leaf_train_loader)) fig = plt.figure(figsize=(25, 4)) for idx in np.arange(8): ax = fig.add_subplot(2, 16 / 2, idx + 1, xticks=[], yticks=[]) plt.imshow(train_images[idx].numpy().transpose(1, 2, 0)) ax.set_title(diagnosis[labels[idx]])
code
32062669/cell_30
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler from torchvision import transforms import datetime import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim gpu_status = torch.cuda.is_available() train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) class LeafDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.transform = transform def __len__(self): return self.df.shape[0] def __getitem__(self, idx): img_src = '../input/plant-pathology-2020-fgvc7/images/' + self.df.loc[idx, 'image_id'] + '.jpg' image = PIL.Image.open(img_src).convert('RGB') if self.transform: image = self.transform(image) if self.df.shape[1] == 5: labels = self.df.loc[idx, ['healthy', 'multiple_diseases', 'rust', 'scab']].values labels = torch.from_numpy(labels.astype(np.uint8)) labels = labels.unsqueeze(-1).long() labels = labels.numpy().tolist().index([1]) labels = torch.from_numpy(np.asarray(labels)) return (image, labels) else: return image leaf_sample_dataset = LeafDataset(df=train_df, transform=None) fig, ax = plt.subplots(1,3) for i in range(3): img, label = leaf_sample_dataset[i] ax[i].imshow(img) print(type(img), img.size,label) leaf_transform = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop((384, 384)), transforms.RandomAffine(degrees=15), transforms.RandomHorizontalFlip(p=0.4), transforms.RandomVerticalFlip(p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) leaf_train_dataset = LeafDataset(df=train_df, transform=leaf_transform) leaf_train_loader = DataLoader(leaf_train_dataset, shuffle=True, batch_size=16) images, labels = next(iter(leaf_train_loader)) dataset_size = len(leaf_train_dataset) indices = list(range(dataset_size)) np.random.shuffle(indices) split = int(np.floor(0.2 * dataset_size)) train_idx, val_idx = (indices[split:], indices[:split]) train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(val_idx) leaf_train_loader = DataLoader(leaf_train_dataset, sampler=train_sampler, batch_size=64) leaf_valid_loader = DataLoader(leaf_train_dataset, sampler=valid_sampler, batch_size=64) test_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/test.csv') leaf_test_dataset = LeafDataset(df=test_df, transform=leaf_transform) leaf_test_loader = DataLoader(leaf_test_dataset, batch_size=64) test_images = next(iter(leaf_test_loader)) diagnosis = ['healthy', 'multiple_diseases', 'rust', 'scab'] train_images, train_labels = next(iter(leaf_train_loader)) fig = plt.figure(figsize=(25,4)) for idx in np.arange(8): ax = fig.add_subplot(2, 16/2, idx+1, xticks=[], yticks=[]) plt.imshow(train_images[idx].numpy().transpose(1,2,0)) ax.set_title(diagnosis[labels[idx]]) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 8, 3, padding=1) self.conv2 = nn.Conv2d(8, 16, 3, padding=1) self.conv3 = nn.Conv2d(16, 32, 3, padding=1) self.conv4 = nn.Conv2d(32, 64, 3, padding=1) self.conv5 = nn.Conv2d(64, 128, 3, padding=1) self.conv6 = nn.Conv2d(128, 256, 2, padding=1) self.conv7 = nn.Conv2d(256, 512, 2, padding=1) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(12 * 12 * 512, 2048) self.fc2 = nn.Linear(2048, 4) self.dropout = nn.Dropout(0.2) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool2(F.relu(self.conv2(x))) x = F.relu(self.conv3(x)) x = self.pool2(F.relu(self.conv4(x))) x = self.pool2(F.relu(self.conv5(x))) x = self.pool2(F.relu(self.conv6(x))) x = self.pool2(F.relu(self.conv7(x))) x = x.view(-1, 12 * 12 * 512) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x model = Net() if gpu_status: model.cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.0008) no_epochs = 40 valid_loss_min = np.Inf curr_time = datetime.datetime.now() curr_timestamp = str(datetime.datetime.now()) for epoch in range(1, no_epochs + 1): train_loss = 0.0 valid_loss = 0.0 model.train() for data, target in leaf_train_loader: if gpu_status: data = data.cuda() target = target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() * data.size(0) model.eval() for data, target in leaf_valid_loader: if gpu_status: data = data.cuda() target = target.cuda() output = model(data) loss = criterion(output, target) valid_loss += loss.item() * data.size(0) train_loss = train_loss / len(leaf_train_loader.dataset) valid_loss = valid_loss / len(leaf_valid_loader.dataset) if valid_loss < valid_loss_min: torch.save(model.state_dict(), 'Kaggle_kernel_model_apple_leaf' + curr_timestamp + '.pt') valid_loss_min = valid_loss file_name = 'Kaggle_kernel_model_apple_leaf' + str(curr_timestamp) model.load_state_dict(torch.load(file_name + '.pt'))
code
32062669/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler from torchvision import transforms import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch gpu_status = torch.cuda.is_available() train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) class LeafDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.transform = transform def __len__(self): return self.df.shape[0] def __getitem__(self, idx): img_src = '../input/plant-pathology-2020-fgvc7/images/' + self.df.loc[idx, 'image_id'] + '.jpg' image = PIL.Image.open(img_src).convert('RGB') if self.transform: image = self.transform(image) if self.df.shape[1] == 5: labels = self.df.loc[idx, ['healthy', 'multiple_diseases', 'rust', 'scab']].values labels = torch.from_numpy(labels.astype(np.uint8)) labels = labels.unsqueeze(-1).long() labels = labels.numpy().tolist().index([1]) labels = torch.from_numpy(np.asarray(labels)) return (image, labels) else: return image leaf_sample_dataset = LeafDataset(df=train_df, transform=None) fig, ax = plt.subplots(1,3) for i in range(3): img, label = leaf_sample_dataset[i] ax[i].imshow(img) print(type(img), img.size,label) leaf_transform = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop((384, 384)), transforms.RandomAffine(degrees=15), transforms.RandomHorizontalFlip(p=0.4), transforms.RandomVerticalFlip(p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) leaf_train_dataset = LeafDataset(df=train_df, transform=leaf_transform) leaf_train_loader = DataLoader(leaf_train_dataset, shuffle=True, batch_size=16) images, labels = next(iter(leaf_train_loader)) test_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/test.csv') leaf_test_dataset = LeafDataset(df=test_df, transform=leaf_transform) leaf_test_loader = DataLoader(leaf_test_dataset, batch_size=64) test_images = next(iter(leaf_test_loader)) print(len(test_images)) print(test_images[0].shape) plt.imshow(test_images[2].numpy().transpose((1, 2, 0)))
code
32062669/cell_29
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') test_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/test.csv') test_df.head(5)
code
32062669/cell_26
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler from torchvision import transforms import datetime import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim gpu_status = torch.cuda.is_available() train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) class LeafDataset(Dataset): def __init__(self, df, transform=None): self.df = df self.transform = transform def __len__(self): return self.df.shape[0] def __getitem__(self, idx): img_src = '../input/plant-pathology-2020-fgvc7/images/' + self.df.loc[idx, 'image_id'] + '.jpg' image = PIL.Image.open(img_src).convert('RGB') if self.transform: image = self.transform(image) if self.df.shape[1] == 5: labels = self.df.loc[idx, ['healthy', 'multiple_diseases', 'rust', 'scab']].values labels = torch.from_numpy(labels.astype(np.uint8)) labels = labels.unsqueeze(-1).long() labels = labels.numpy().tolist().index([1]) labels = torch.from_numpy(np.asarray(labels)) return (image, labels) else: return image leaf_sample_dataset = LeafDataset(df=train_df, transform=None) fig, ax = plt.subplots(1,3) for i in range(3): img, label = leaf_sample_dataset[i] ax[i].imshow(img) print(type(img), img.size,label) leaf_transform = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop((384, 384)), transforms.RandomAffine(degrees=15), transforms.RandomHorizontalFlip(p=0.4), transforms.RandomVerticalFlip(p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) leaf_train_dataset = LeafDataset(df=train_df, transform=leaf_transform) leaf_train_loader = DataLoader(leaf_train_dataset, shuffle=True, batch_size=16) images, labels = next(iter(leaf_train_loader)) dataset_size = len(leaf_train_dataset) indices = list(range(dataset_size)) np.random.shuffle(indices) split = int(np.floor(0.2 * dataset_size)) train_idx, val_idx = (indices[split:], indices[:split]) train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(val_idx) leaf_train_loader = DataLoader(leaf_train_dataset, sampler=train_sampler, batch_size=64) leaf_valid_loader = DataLoader(leaf_train_dataset, sampler=valid_sampler, batch_size=64) test_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/test.csv') leaf_test_dataset = LeafDataset(df=test_df, transform=leaf_transform) leaf_test_loader = DataLoader(leaf_test_dataset, batch_size=64) test_images = next(iter(leaf_test_loader)) diagnosis = ['healthy', 'multiple_diseases', 'rust', 'scab'] train_images, train_labels = next(iter(leaf_train_loader)) fig = plt.figure(figsize=(25,4)) for idx in np.arange(8): ax = fig.add_subplot(2, 16/2, idx+1, xticks=[], yticks=[]) plt.imshow(train_images[idx].numpy().transpose(1,2,0)) ax.set_title(diagnosis[labels[idx]]) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 8, 3, padding=1) self.conv2 = nn.Conv2d(8, 16, 3, padding=1) self.conv3 = nn.Conv2d(16, 32, 3, padding=1) self.conv4 = nn.Conv2d(32, 64, 3, padding=1) self.conv5 = nn.Conv2d(64, 128, 3, padding=1) self.conv6 = nn.Conv2d(128, 256, 2, padding=1) self.conv7 = nn.Conv2d(256, 512, 2, padding=1) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(12 * 12 * 512, 2048) self.fc2 = nn.Linear(2048, 4) self.dropout = nn.Dropout(0.2) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool2(F.relu(self.conv2(x))) x = F.relu(self.conv3(x)) x = self.pool2(F.relu(self.conv4(x))) x = self.pool2(F.relu(self.conv5(x))) x = self.pool2(F.relu(self.conv6(x))) x = self.pool2(F.relu(self.conv7(x))) x = x.view(-1, 12 * 12 * 512) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x model = Net() if gpu_status: model.cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.0008) no_epochs = 40 valid_loss_min = np.Inf curr_time = datetime.datetime.now() curr_timestamp = str(datetime.datetime.now()) for epoch in range(1, no_epochs + 1): train_loss = 0.0 valid_loss = 0.0 model.train() for data, target in leaf_train_loader: if gpu_status: data = data.cuda() target = target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() * data.size(0) model.eval() for data, target in leaf_valid_loader: if gpu_status: data = data.cuda() target = target.cuda() output = model(data) loss = criterion(output, target) valid_loss += loss.item() * data.size(0) train_loss = train_loss / len(leaf_train_loader.dataset) valid_loss = valid_loss / len(leaf_valid_loader.dataset) print(datetime.datetime.now() - curr_time) print('Epoch {}: Training Loss : {:.4f} Validation Loss : {:.4f}'.format(epoch, train_loss, valid_loss)) if valid_loss < valid_loss_min: print('Validation loss decreased {:.6f} -> {:.6f}, Saving model...'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), 'Kaggle_kernel_model_apple_leaf' + curr_timestamp + '.pt') valid_loss_min = valid_loss
code
32062669/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import torch gpu_status = torch.cuda.is_available() if not gpu_status: print('No GPU, Using CPU') else: print('Using GPU')
code
32062669/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/plant-pathology-2020-fgvc7/train.csv') def get_label(row): for c in train_df.columns[1:]: if row[c] == 1: return c train_df_copy = train_df.copy() train_df_copy['label'] = train_df_copy.apply(get_label, axis=1) sample_img = train_df.iloc[1, 0] sample_labels = train_df.iloc[1, :] sample_labels = np.asarray(sample_labels) print(len(train_df)) print(train_df.shape[1])
code