path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90156125/cell_16
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum()
code
90156125/cell_17
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum() previous_match.dropna()
code
90156125/cell_24
[ "text_html_output_1.png" ]
import cudf as pd import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum() previous_match.dropna() previous_match.season.unique() order = previous_match.city.value_counts().iloc[:10].index plt.figure(figsize=(30, 10)) order = previous_match.winner.value_counts().iloc[:10].index sns.countplot(x=previous_match['winner'], palette='rainbow', data=previous_match, order=order) plt.show()
code
90156125/cell_14
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape
code
90156125/cell_22
[ "text_plain_output_1.png" ]
import cudf as pd import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') previous_match.shape previous_match.isnull().sum() previous_match.dropna() previous_match.season.unique() plt.subplots(figsize=(15, 6)) order = previous_match.city.value_counts().iloc[:10].index sns.countplot(x=previous_match['city'], data=previous_match, order=order) plt.show()
code
90156125/cell_10
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') df_train.isnull().sum() df_train.describe().T df_train.head()
code
90156125/cell_5
[ "image_output_1.png" ]
import cudf as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Training.csv') match_2020 = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2020.csv') previous_match = pd.read_csv('/kaggle/input/ipl-2020-player-performance/Matches IPL 2008-2019.csv') players = pd.read_csv('../input/ipl-2020-player-performance/IPL 2020 Squads.csv', encoding='windows-1254') df_train.head()
code
32068954/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.sql import SparkSession import json import os spark = SparkSession.builder.appName('SimpleApp').getOrCreate() sc = spark.sparkContext def findArticlePath(article_sha_id): """ This function finds the full path given an article_sha_id """ ROOT_PATH = '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/' FILE_SUFFIX = '.json' article_path = ROOT_PATH + article_sha_id + FILE_SUFFIX return article_path def retrieveJson(article_sha_id): """ Given a 1-word string containing a JSON key, return the data for those keys. Also return the location of those keys? """ article_file_path = findArticlePath(article_sha_id) with open(article_file_path, 'r') as read_file: json_string = json.load(read_file) return json_string def retrieveKey(json_input, key_input): """ Uses retrieveTopKey and retrieveSubKey to return key values from anywhere in the JSON data string. """ result = [] istopitem = False if not isinstance(json_input, list): json_string = json_input json_input = [] json_input.append(json_string) istopitem = True if istopitem: for key in json_string: if key == key_input: top_key_value = json_string[key] result.append(top_key_value) return result for json_string in json_input: if isinstance(json_string, dict): for key in json_string: top_key_value = json_string[key] if isinstance(top_key_value, dict): for sub_key in top_key_value: sub_key_value = top_key_value[sub_key] if sub_key == key_input: result.append(sub_key_value) else: sub_result = retrieveKey(sub_key_value, key_input) if len(sub_result) != 0: result.append(sub_result) elif isinstance(top_key_value, list): for top_key_value_item in top_key_value: if isinstance(top_key_value_item, dict): for sub_key in top_key_value_item: sub_key_value = top_key_value_item[sub_key] if sub_key == key_input: result.append(sub_key_value) else: sub_result = retrieveKey(sub_key_value, key_input) if len(sub_result) != 0: result.append(sub_result) else: result.append(top_key_value_item) return result def searchWordInTitle(word): article_list = [] for files in os.listdir(path='/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/'): article_sha_id = files.split('.')[0] article_json = retrieveJson(article_sha_id) title = retrieveKey(article_json, 'title') if isinstance(title, list): for item in title: if isinstance(item, list): for it in item: find_result = it.find(word) if find_result >= 0: if not article_sha_id in article_list: article_list.append(article_sha_id) else: find_result = item.find(word) if find_result >= 0: if not article_sha_id in article_list: article_list.append(article_sha_id) else: find_words = title.find(word) if find_words >= 0: if not article_sha_id in article_list: article_list.append(article_sha_id) return article_list def abstract(data): """ Abstract """ data_item = data[0] row_entries = data_item[0] for row in row_entries: cite_span = row[0] ref_span = row[1] section = row[2] text = row[3] return (cite_span, ref_span, section, text) def back_matter(data): """ Back Matter """ data_item = data[0] row_entries = data_item[0] for row in row_entries: cite_span = row[0] ref_span = row[1] section = row[2] text = row[3] return (cite_span, ref_span, section, text) def bib_entries(data): """ Bib Entries """ data_item = data[0] row_entries = data_item[0] for row in row_entries: cite_span = row[0] ref_span = row[1] section = row[2] text = row[3] return (cite_span, ref_span, section, text) def body_text(data): """ Body Text """ data_item = data[0] row_entries = data_item[0] for row in row_entries: cite_span = row[0] ref_span = row[1] section = row[2] text = row[3] return (cite_span, ref_span, section, text) def metadata(data): """ Metadata """ data_item = data[0] row_entries = data_item[0] num_entries = len(row_entries) i = 0 for row in row_entries: if i < num_entries - 1: rowitems = row_entries[i][0] affiliation = rowitems[0] institution = affiliation[0] laboratory = affiliation[1] location = affiliation[2] email = rowitems[1] first = rowitems[2] last = rowitems[3] middle = rowitems[4] suffix = rowitems[5] else: title = row i += 1 return (affiliation, institution, laboratory, location, email, first, last, middle, suffix) def paper_id(data): """ Paper Id """ data_item = data[0] paper_id = data_item[0] return paper_id def ref_entries(data): """ Ref Entries """ data_item = data[0] row_entries = data_item[0] for row in row_entries: latex = row[0] text = row[1] type_data = row[2] return (latex, text, type_data) column_functions = {0: abstract, 1: back_matter, 2: bib_entries, 3: body_text, 4: metadata, 5: paper_id, 6: ref_entries} def returnTextualReferences(article_list): article_list = [] for files in os.listdir(path='/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/'): article_sha_id = files.split('.')[0] article_path = retrieveArticlePath(article_sha_id) article_df = spark.read.json(article_path) article_df.createOrReplaceTempView('article') column_results = [] column_names = [] for column_name in df.schema.names: strSQL = 'SELECT ' + column_name + ' from article' column_result = spark.sql(strSQL) column_results.append(column_result) column_names.append(column_name) i = 0 for column_result in column_results: column_name = column_names[i] column_schema = column_result.schema column_rdd = column_result.rdd result = column_result.collect() func = column_functions.get(i) item_result = func(result) i += 1 vaccine_in_title = searchWordInTitle('vaccine') returnTextualReferences(vaccine_in_title)
code
32068954/cell_2
[ "text_plain_output_1.png" ]
!pip install pyspark
code
104129811/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import plotly.graph_objects as go train = pd.read_csv('../input/standup-targets/train.csv') y0 = train.score.values fig = go.Figure() fig.add_trace(go.Box(y=y0, name='Train', marker_color='#1e90ff')) fig.update_layout(title_text='Score stats (individual shot)') fig.update_yaxes() fig.update_xaxes(showticklabels=False) fig.show()
code
104129811/cell_4
[ "text_html_output_2.png" ]
import pandas as pd train = pd.read_csv('../input/standup-targets/train.csv') train.head()
code
104129811/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/standup-targets/train.csv') train
code
104129811/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/standup-targets/train.csv') print(f'Number of bullet impacts : {train.shape[0]}') print(f'Average number of impacts per target : {train.shape[0] / len(train.image_name.unique())}')
code
104129811/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objects as go train = pd.read_csv('../input/standup-targets/train.csv') y0 = train.score.values fig = go.Figure() fig.add_trace(go.Box(y=y0, name='Train', marker_color='#1e90ff')) fig.update_layout(title_text='Score stats (individual shot)') fig.update_yaxes() fig.update_xaxes(showticklabels=False) colors = ['#1e90ff'] * 7 colors[0] = '#ff6347' dict_ = dict(train.score.value_counts()) x = list(dict_.keys()) y = list(dict_.values()) fig = go.Figure(data=[go.Bar(x=x, y=y, marker_color=colors)]) fig.update_layout(title_text='Score distribution (individual shot)') y0 = list(train.groupby(['image_name'])[['score']].mean().values.ravel()) fig = go.Figure() fig.add_trace(go.Box(y=y0, name='Train', marker_color='#1e90ff')) fig.update_layout(title_text='Score stats (per series)') fig.update_yaxes() fig.update_xaxes(showticklabels=False) df_to_merge_on = pd.DataFrame(train.image_name.value_counts()) train = train.merge(df_to_merge_on, right_on=train.image_name.value_counts().index, left_on='image_name').drop(['image_name_x'], axis=1) train = train.rename(columns={'image_name_y': 'nb_bullets'}) y0 = list(train.loc[train['nb_bullets'] == 1, :].groupby(['image_name'])[['score']].mean().values.ravel()) y1 = list(train.loc[train['nb_bullets'] == 2, :].groupby(['image_name'])[['score']].mean().values.ravel()) y2 = list(train.loc[train['nb_bullets'] == 3, :].groupby(['image_name'])[['score']].mean().values.ravel()) y3 = list(train.loc[train['nb_bullets'] == 4, :].groupby(['image_name'])[['score']].mean().values.ravel()) y4 = list(train.loc[train['nb_bullets'] == 5, :].groupby(['image_name'])[['score']].mean().values.ravel()) y5 = list(train.loc[train['nb_bullets'] == 6, :].groupby(['image_name'])[['score']].mean().values.ravel()) fig = go.Figure() fig.add_trace(go.Box(y=y0, name='1 bullet series')) fig.add_trace(go.Box(y=y1, name='2 bullet series', marker_color='blue')) fig.add_trace(go.Box(y=y2, name='3 bullet series', marker_color='blue')) fig.add_trace(go.Box(y=y3, name='4 bullet series', marker_color='blue')) fig.add_trace(go.Box(y=y4, name='5 bullet series', marker_color='blue')) fig.add_trace(go.Box(y=y5, name='6 bullet series', marker_color='blue')) fig.update_layout(legend_title_text='Discourse effectiveness', title_text='Mean size of sentences in discourse (logscale)') fig.update_xaxes(showticklabels=False) fig.show()
code
104129811/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import plotly.graph_objects as go train = pd.read_csv('../input/standup-targets/train.csv') y0 = train.score.values fig = go.Figure() fig.add_trace(go.Box(y=y0, name='Train', marker_color='#1e90ff')) fig.update_layout(title_text='Score stats (individual shot)') fig.update_yaxes() fig.update_xaxes(showticklabels=False) colors = ['#1e90ff'] * 7 colors[0] = '#ff6347' dict_ = dict(train.score.value_counts()) x = list(dict_.keys()) y = list(dict_.values()) fig = go.Figure(data=[go.Bar(x=x, y=y, marker_color=colors)]) fig.update_layout(title_text='Score distribution (individual shot)')
code
104129811/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import plotly.graph_objects as go train = pd.read_csv('../input/standup-targets/train.csv') y0 = train.score.values fig = go.Figure() fig.add_trace(go.Box(y=y0, name='Train', marker_color='#1e90ff')) fig.update_layout(title_text='Score stats (individual shot)') fig.update_yaxes() fig.update_xaxes(showticklabels=False) colors = ['#1e90ff'] * 7 colors[0] = '#ff6347' dict_ = dict(train.score.value_counts()) x = list(dict_.keys()) y = list(dict_.values()) fig = go.Figure(data=[go.Bar(x=x, y=y, marker_color=colors)]) fig.update_layout(title_text='Score distribution (individual shot)') y0 = list(train.groupby(['image_name'])[['score']].mean().values.ravel()) fig = go.Figure() fig.add_trace(go.Box(y=y0, name='Train', marker_color='#1e90ff')) fig.update_layout(title_text='Score stats (per series)') fig.update_yaxes() fig.update_xaxes(showticklabels=False) fig.show()
code
74052264/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/covidqa/community.csv') df.head()
code
74052264/cell_6
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np import pandas as pd df = pd.read_csv('../input/covidqa/community.csv') vectorizer = TfidfVectorizer() vectorizer.fit(np.concatenate((df.question, df.answer)))
code
74052264/cell_11
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import numpy as np import pandas as pd df = pd.read_csv('../input/covidqa/community.csv') vectorizer = TfidfVectorizer() vectorizer.fit(np.concatenate((df.question, df.answer))) Question_vectors = vectorizer.transform(df.question) name = input('Enter your name : ') print(f'BOT : Hello {name}! How can I help you ?') while True: input_question = input(f'{name} : ') if input_question.lower() == 'bye': print(f'BOT : Bye! Have a nice day and maintain proper norms and regulations to stop the spread of COVID.') break input_question_vector = vectorizer.transform([input_question]) similarities = cosine_similarity(input_question_vector, Question_vectors) closest = np.argmax(similarities, axis=1) print(f'BOT : {df.answer.iloc[closest].values[0]}')
code
2039183/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import Phrases from gensim.models import word2vec from gensim.models.phrases import Phraser from nltk.corpus import stopwords import logging import pandas as pd import pickle import pickle import re import pandas as pd import re from nltk.corpus import stopwords from gensim.models import word2vec import pickle import nltk.data import os tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') path = '../input/' TRAIN_DATA_FILE = f'{path}train.csv' TEST_DATA_FILE = f'{path}test.csv' train = pd.read_csv(TRAIN_DATA_FILE, header=0) test = pd.read_csv(TEST_DATA_FILE, header=0) all_comments = train['comment_text'].fillna('_na_').tolist() + test['comment_text'].fillna('_na_').tolist() with open('all_comments.csv', 'w+') as comments_file: i = 0 for comment in all_comments: comment = re.sub('[^a-zA-Z]', ' ', str(comment)) comments_file.write('%s\n' % comment) class FileToComments(object): def __init__(self, filename): self.filename = filename self.stop = set(nltk.corpus.stopwords.words('english')) def __iter__(self): def comment_to_wordlist(comment, remove_stopwords=True): comment = str(comment) words = comment.lower().split() return words for line in open(self.filename, 'r'): tokenized_comment = comment_to_wordlist(line, tokenizer) yield tokenized_comment all_comments = FileToComments('all_comments.csv') from gensim.models import Phrases from gensim.models.phrases import Phraser bigram = Phrases(all_comments, min_count=30, threshold=15) bigram_phraser = Phraser(bigram) all_tokens = [bigram_phraser[comment] for comment in all_comments] stops = set(stopwords.words('english')) clean_all_tokens = [] for token in all_tokens: words = [w for w in token if not w in stops] clean_all_tokens += [words] import pickle with open('tokenized_all_comments.pickle', 'wb') as filename: pickle.dump(clean_all_tokens, filename, protocol=pickle.HIGHEST_PROTOCOL) with open('data/tokenized_comments/tokenized_all_comments.pickle', 'rb') as filename: all_comments = pickle.load(filename) import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) num_features = 300 min_word_count = 20 num_workers = 16 context = 10 downsampling = 0.001 print('Training model...') model = word2vec.Word2Vec(all_comments, workers=num_workers, size=num_features, min_count=min_word_count, window=context, sample=downsampling) model.init_sims(replace=True) model_name = 'models/%sfeatures_%sminwords_%scontext' % (num_features, min_word_count, context) model.save(model_name)
code
2039183/cell_6
[ "text_plain_output_1.png" ]
from gensim.models import Phrases from gensim.models.phrases import Phraser from nltk.corpus import stopwords import pandas as pd import re import pandas as pd import re from nltk.corpus import stopwords from gensim.models import word2vec import pickle import nltk.data import os tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') path = '../input/' TRAIN_DATA_FILE = f'{path}train.csv' TEST_DATA_FILE = f'{path}test.csv' train = pd.read_csv(TRAIN_DATA_FILE, header=0) test = pd.read_csv(TEST_DATA_FILE, header=0) all_comments = train['comment_text'].fillna('_na_').tolist() + test['comment_text'].fillna('_na_').tolist() with open('all_comments.csv', 'w+') as comments_file: i = 0 for comment in all_comments: comment = re.sub('[^a-zA-Z]', ' ', str(comment)) comments_file.write('%s\n' % comment) class FileToComments(object): def __init__(self, filename): self.filename = filename self.stop = set(nltk.corpus.stopwords.words('english')) def __iter__(self): def comment_to_wordlist(comment, remove_stopwords=True): comment = str(comment) words = comment.lower().split() return words for line in open(self.filename, 'r'): tokenized_comment = comment_to_wordlist(line, tokenizer) yield tokenized_comment all_comments = FileToComments('all_comments.csv') from gensim.models import Phrases from gensim.models.phrases import Phraser bigram = Phrases(all_comments, min_count=30, threshold=15) bigram_phraser = Phraser(bigram) all_tokens = [bigram_phraser[comment] for comment in all_comments] stops = set(stopwords.words('english')) clean_all_tokens = [] for token in all_tokens: words = [w for w in token if not w in stops] clean_all_tokens += [words] print('tokens cleaned')
code
2039183/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import re path = '../input/' TRAIN_DATA_FILE = f'{path}train.csv' TEST_DATA_FILE = f'{path}test.csv' train = pd.read_csv(TRAIN_DATA_FILE, header=0) test = pd.read_csv(TEST_DATA_FILE, header=0) print('Read %d labeled train reviews and %d unlabelled test reviews' % (len(train), len(test))) all_comments = train['comment_text'].fillna('_na_').tolist() + test['comment_text'].fillna('_na_').tolist() with open('all_comments.csv', 'w+') as comments_file: i = 0 for comment in all_comments: comment = re.sub('[^a-zA-Z]', ' ', str(comment)) comments_file.write('%s\n' % comment)
code
2039183/cell_7
[ "text_plain_output_1.png" ]
from gensim.models import Phrases from gensim.models.phrases import Phraser from nltk.corpus import stopwords import pandas as pd import pickle import pickle import re import pandas as pd import re from nltk.corpus import stopwords from gensim.models import word2vec import pickle import nltk.data import os tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') path = '../input/' TRAIN_DATA_FILE = f'{path}train.csv' TEST_DATA_FILE = f'{path}test.csv' train = pd.read_csv(TRAIN_DATA_FILE, header=0) test = pd.read_csv(TEST_DATA_FILE, header=0) all_comments = train['comment_text'].fillna('_na_').tolist() + test['comment_text'].fillna('_na_').tolist() with open('all_comments.csv', 'w+') as comments_file: i = 0 for comment in all_comments: comment = re.sub('[^a-zA-Z]', ' ', str(comment)) comments_file.write('%s\n' % comment) class FileToComments(object): def __init__(self, filename): self.filename = filename self.stop = set(nltk.corpus.stopwords.words('english')) def __iter__(self): def comment_to_wordlist(comment, remove_stopwords=True): comment = str(comment) words = comment.lower().split() return words for line in open(self.filename, 'r'): tokenized_comment = comment_to_wordlist(line, tokenizer) yield tokenized_comment all_comments = FileToComments('all_comments.csv') from gensim.models import Phrases from gensim.models.phrases import Phraser bigram = Phrases(all_comments, min_count=30, threshold=15) bigram_phraser = Phraser(bigram) all_tokens = [bigram_phraser[comment] for comment in all_comments] stops = set(stopwords.words('english')) clean_all_tokens = [] for token in all_tokens: words = [w for w in token if not w in stops] clean_all_tokens += [words] import pickle with open('tokenized_all_comments.pickle', 'wb') as filename: pickle.dump(clean_all_tokens, filename, protocol=pickle.HIGHEST_PROTOCOL) print('files saved to tokenized_all_comments.pickle...')
code
122259592/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd sample_sub = pd.read_csv('sample_submission.csv') test_data = pd.read_csv('test_dataset.csv', index_col=0) train_data = pd.read_csv('train_dataset.csv', index_col=0) sample_sub.head()
code
48162572/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') submission.head()
code
48162572/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape ALL.columns
code
48162572/cell_23
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape
code
48162572/cell_30
[ "image_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape ALL.head()
code
48162572/cell_44
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in ALL: if x in ALL.columns[-ADD_CNT:]: ALL[x] = standardization(ALL[x]) ALL = ALL.drop(['sig_id'], axis=1) from sklearn.decomposition import PCA pca = PCA(150) ALL = pca.fit_transform(ALL) Columns = [] for i in range(150): Columns.append('d' + str(i + 1)) ALL = pd.DataFrame(ALL, columns=Columns) categorical_features = Columns ALL.head()
code
48162572/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') train_features.head()
code
48162572/cell_39
[ "text_html_output_1.png" ]
import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in ALL: if x in ALL.columns[-ADD_CNT:]: ALL[x] = standardization(ALL[x]) ALL.describe()
code
48162572/cell_41
[ "image_output_1.png" ]
import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in ALL: if x in ALL.columns[-ADD_CNT:]: ALL[x] = standardization(ALL[x]) ALL = ALL.drop(['sig_id'], axis=1) ALL.head()
code
48162572/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') plt.figure(figsize=(8, 6)) plt.subplot(2, 2, 1) plt.hist(test_features['g-0']) plt.subplot(2, 2, 2) plt.hist(test_features['c-0']) plt.subplot(2, 2, 3) plt.hist(test_features['g-178']) plt.subplot(2, 2, 4) plt.hist(test_features['c-32']) plt.show()
code
48162572/cell_52
[ "text_html_output_1.png" ]
x_train.head()
code
48162572/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') train_targets.head()
code
48162572/cell_45
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in ALL: if x in ALL.columns[-ADD_CNT:]: ALL[x] = standardization(ALL[x]) ALL = ALL.drop(['sig_id'], axis=1) from sklearn.decomposition import PCA pca = PCA(150) ALL = pca.fit_transform(ALL) Columns = [] for i in range(150): Columns.append('d' + str(i + 1)) ALL = pd.DataFrame(ALL, columns=Columns) categorical_features = Columns pca.explained_variance_ratio_
code
48162572/cell_49
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in ALL: if x in ALL.columns[-ADD_CNT:]: ALL[x] = standardization(ALL[x]) ALL = ALL.drop(['sig_id'], axis=1) from sklearn.decomposition import PCA pca = PCA(150) ALL = pca.fit_transform(ALL) Columns = [] for i in range(150): Columns.append('d' + str(i + 1)) ALL = pd.DataFrame(ALL, columns=Columns) categorical_features = Columns ALL.head()
code
48162572/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') plt.figure(figsize=(8, 6)) plt.subplot(2, 2, 1) plt.hist(train_features['g-0']) plt.subplot(2, 2, 2) plt.hist(train_features['c-0']) plt.subplot(2, 2, 3) plt.hist(train_features['g-178']) plt.subplot(2, 2, 4) plt.hist(train_features['c-32']) plt.show()
code
48162572/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') test_features.head()
code
48162572/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') test_features.describe()
code
48162572/cell_31
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape plt.figure(figsize=(8, 6)) plt.subplot(2, 2, 1) plt.hist(ALL['g-0']) plt.subplot(2, 2, 2) plt.hist(ALL['c-0']) plt.subplot(2, 2, 3) plt.hist(ALL['g-178']) plt.subplot(2, 2, 4) plt.hist(ALL['c-32']) plt.show()
code
48162572/cell_46
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 for x in ALL: if x in ALL.columns[-ADD_CNT:]: ALL[x] = standardization(ALL[x]) ALL = ALL.drop(['sig_id'], axis=1) from sklearn.decomposition import PCA pca = PCA(150) ALL = pca.fit_transform(ALL) Columns = [] for i in range(150): Columns.append('d' + str(i + 1)) ALL = pd.DataFrame(ALL, columns=Columns) categorical_features = Columns pca.explained_variance_ratio_ pca.explained_variance_ratio_.cumsum()
code
48162572/cell_24
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape ALL.head()
code
48162572/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') train_features.describe()
code
48162572/cell_37
[ "text_html_output_1.png" ]
import pandas as pd import statistics train_features = pd.read_csv('/kaggle/input/lish-moa/train_features.csv') train_targets = pd.read_csv('/kaggle/input/lish-moa/train_targets_scored.csv') test_features = pd.read_csv('/kaggle/input/lish-moa/test_features.csv') submission = pd.read_csv('/kaggle/input/lish-moa/sample_submission.csv') ALL = pd.concat([train_features, test_features]) ALL.shape def standardization(l): l_mean = statistics.mean(l) l_stdev = statistics.stdev(l) ret = [] for x in l: y = (x - l_mean) / l_stdev ret.append(y) return ret ALL.columns GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] ADD_CNT = 12 for x in GENES: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1 print('OK') for x in CELLS: ALL[x + 'High'] = (ALL[x] ** 2) ** 0.5 MAX = ALL[x + 'High'].max() for y in ALL[x + 'High']: if y <= MAX / 2: y = 0 else: y = y - MAX / 2 ALL[x + 'High'] = ALL[x + 'High'] * (abs(ALL[x]) / ALL[x]) ADD_CNT += 1
code
88092902/cell_9
[ "text_html_output_4.png", "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from cuml.svm import SVR import matplotlib.pyplot as plt import pandas as pd import random data_types_dict = {'time_id': 'int16', 'investment_id': 'int16', 'target': 'float32'} features = [f'f_{i}' for i in range(300)] for f in features: data_types_dict[f] = 'float32' train = pd.read_csv('../input/ubiquant-market-prediction/train.csv', usecols=data_types_dict.keys(), dtype=data_types_dict) N_DEVIDE_DATA = 20 n_row = len(train) idx_list = list(range(n_row)) random.shuffle(idx_list) models = [] for i in range(N_DEVIDE_DATA - 1): start_idx = int(n_row / N_DEVIDE_DATA) * i end_idx = int(n_row / N_DEVIDE_DATA) * (i + 1) devide_idx_list = idx_list[start_idx:end_idx] tr = train.iloc[devide_idx_list] X = tr[features].to_numpy() y = tr['target'].to_numpy() model = SVR(C=5.0, kernel='rbf', epsilon=0.1) model.fit(X, y) r2 = model.score(X, y) models.append(model) start_idx = int(n_row / N_DEVIDE_DATA) * 19 devide_idx_list = idx_list[start_idx:] test = train.iloc[devide_idx_list] X_test = test[features].to_numpy() y_test = test['target'].to_numpy() pre_y = 0 for model in models: pre_y += model.predict(X_test) pre_y /= len(models) plt.scatter(y_test, pre_y)
code
88092902/cell_6
[ "text_plain_output_1.png" ]
from cuml.svm import SVR import pandas as pd import random data_types_dict = {'time_id': 'int16', 'investment_id': 'int16', 'target': 'float32'} features = [f'f_{i}' for i in range(300)] for f in features: data_types_dict[f] = 'float32' train = pd.read_csv('../input/ubiquant-market-prediction/train.csv', usecols=data_types_dict.keys(), dtype=data_types_dict) N_DEVIDE_DATA = 20 n_row = len(train) idx_list = list(range(n_row)) random.shuffle(idx_list) models = [] for i in range(N_DEVIDE_DATA - 1): start_idx = int(n_row / N_DEVIDE_DATA) * i end_idx = int(n_row / N_DEVIDE_DATA) * (i + 1) devide_idx_list = idx_list[start_idx:end_idx] tr = train.iloc[devide_idx_list] X = tr[features].to_numpy() y = tr['target'].to_numpy() model = SVR(C=5.0, kernel='rbf', epsilon=0.1) model.fit(X, y) r2 = model.score(X, y) print(i, 'R^2:', r2) models.append(model)
code
88092902/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from cuml.svm import SVR import pandas as pd import pickle import random import ubiquant data_types_dict = {'time_id': 'int16', 'investment_id': 'int16', 'target': 'float32'} features = [f'f_{i}' for i in range(300)] for f in features: data_types_dict[f] = 'float32' train = pd.read_csv('../input/ubiquant-market-prediction/train.csv', usecols=data_types_dict.keys(), dtype=data_types_dict) N_DEVIDE_DATA = 20 n_row = len(train) idx_list = list(range(n_row)) random.shuffle(idx_list) models = [] for i in range(N_DEVIDE_DATA - 1): start_idx = int(n_row / N_DEVIDE_DATA) * i end_idx = int(n_row / N_DEVIDE_DATA) * (i + 1) devide_idx_list = idx_list[start_idx:end_idx] tr = train.iloc[devide_idx_list] X = tr[features].to_numpy() y = tr['target'].to_numpy() model = SVR(C=5.0, kernel='rbf', epsilon=0.1) model.fit(X, y) r2 = model.score(X, y) models.append(model) start_idx = int(n_row / N_DEVIDE_DATA) * 19 devide_idx_list = idx_list[start_idx:] test = train.iloc[devide_idx_list] X_test = test[features].to_numpy() y_test = test['target'].to_numpy() pre_y = 0 for model in models: pre_y += model.predict(X_test) pre_y /= len(models) for i, model in enumerate(models): filename = 'model_svr_{}.sav'.format(i) pickle.dump(model, open(filename, 'wb')) models = [] for i in range(19): filename = 'model_svr_{}.sav'.format(i) loaded_model = pickle.load(open(filename, 'rb')) models.append(loaded_model) import ubiquant env = ubiquant.make_env() iter_test = env.iter_test() for test_df, sample_prediction_df in iter_test: test_x = test_df[features].to_numpy() for loaded_model in models: sample_prediction_df['target'] += loaded_model.predict(test_x) sample_prediction_df['target'] /= len(models) + 1 env.predict(sample_prediction_df) display(sample_prediction_df)
code
88092902/cell_10
[ "text_plain_output_1.png" ]
from cuml.svm import SVR import numpy as np import pandas as pd import random data_types_dict = {'time_id': 'int16', 'investment_id': 'int16', 'target': 'float32'} features = [f'f_{i}' for i in range(300)] for f in features: data_types_dict[f] = 'float32' train = pd.read_csv('../input/ubiquant-market-prediction/train.csv', usecols=data_types_dict.keys(), dtype=data_types_dict) N_DEVIDE_DATA = 20 n_row = len(train) idx_list = list(range(n_row)) random.shuffle(idx_list) models = [] for i in range(N_DEVIDE_DATA - 1): start_idx = int(n_row / N_DEVIDE_DATA) * i end_idx = int(n_row / N_DEVIDE_DATA) * (i + 1) devide_idx_list = idx_list[start_idx:end_idx] tr = train.iloc[devide_idx_list] X = tr[features].to_numpy() y = tr['target'].to_numpy() model = SVR(C=5.0, kernel='rbf', epsilon=0.1) model.fit(X, y) r2 = model.score(X, y) models.append(model) start_idx = int(n_row / N_DEVIDE_DATA) * 19 devide_idx_list = idx_list[start_idx:] test = train.iloc[devide_idx_list] X_test = test[features].to_numpy() y_test = test['target'].to_numpy() pre_y = 0 for model in models: pre_y += model.predict(X_test) pre_y /= len(models) np.corrcoef(y_test.tolist(), pre_y.tolist())
code
128005771/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('diabetes_prediction_dataset.csv') df.head()
code
32064989/cell_21
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') sum(model['errors0'])
code
32064989/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt #for visualizing import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') plt.scatter(x='YearsExperience', y='Salary', data=Salary)
code
32064989/cell_25
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') sum(model['errors2'])
code
32064989/cell_34
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr_model = LinearRegression() lr_model.fit(x_train, y_train) lr_model.intercept_
code
32064989/cell_23
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') sum(model['errors1'])
code
32064989/cell_30
[ "text_plain_output_1.png" ]
print(x_train.shape, y_train.shape)
code
32064989/cell_33
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr_model = LinearRegression() lr_model.fit(x_train, y_train)
code
32064989/cell_20
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') model
code
32064989/cell_40
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt #for visualizing import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') fig,ax = plt.subplots() ax.scatter(x='YearsExperience',y='Salary',data=Salary) ax.add_line(plt.Line2D(model['YearsExperience'],model.predicted0,color='red')) ax.add_line(plt.Line2D(model['YearsExperience'],model.predicted1,color='Black')) ax.add_line(plt.Line2D(model['YearsExperience'],model.predicted2,color='Green')) x = Salary.loc[:, ['YearsExperience']] y = Salary.loc[:, ['Salary']] fig, ax = plt.subplots() ax.scatter(x='YearsExperience', y='Salary', data=Salary) ax.add_line(plt.Line2D(model['YearsExperience'], model.predicted3, color='red')) ax.add_line(plt.Line2D(model['YearsExperience'], model.predicted0, color='Green')) ax.add_line(plt.Line2D(model['YearsExperience'], model.predicted2, color='Black'))
code
32064989/cell_39
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt #for visualizing import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') fig,ax = plt.subplots() ax.scatter(x='YearsExperience',y='Salary',data=Salary) ax.add_line(plt.Line2D(model['YearsExperience'],model.predicted0,color='red')) ax.add_line(plt.Line2D(model['YearsExperience'],model.predicted1,color='Black')) ax.add_line(plt.Line2D(model['YearsExperience'],model.predicted2,color='Green')) model
code
32064989/cell_26
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') sum(model['errors2'] ** 2)
code
32064989/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32064989/cell_7
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') Salary.info() Salary.describe()
code
32064989/cell_32
[ "text_plain_output_1.png" ]
print(x_test.shape, y_test.shape)
code
32064989/cell_15
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') model
code
32064989/cell_35
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr_model = LinearRegression() lr_model.fit(x_train, y_train) lr_model.intercept_ lr_model.coef_
code
32064989/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
x_train
code
32064989/cell_24
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') sum(model['errors1'] ** 2)
code
32064989/cell_22
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') sum(model['errors0'] ** 2)
code
32064989/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt #for visualizing import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') fig, ax = plt.subplots() ax.scatter(x='YearsExperience', y='Salary', data=Salary) ax.add_line(plt.Line2D(model['YearsExperience'], model.predicted0, color='red')) ax.add_line(plt.Line2D(model['YearsExperience'], model.predicted1, color='Black')) ax.add_line(plt.Line2D(model['YearsExperience'], model.predicted2, color='Green'))
code
32064989/cell_37
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr_model = LinearRegression() lr_model.fit(x_train, y_train) lr_model.intercept_ lr_model.coef_ lr_model.score(x_train, y_train) lr_model.score(x_test, y_test)
code
32064989/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd pd.options.display.float_format = '{:20,.2f}'.format import os Salary = pd.read_csv('../input/salary/Salary.csv') model = pd.read_csv('../input/salary/Salary.csv') model
code
32064989/cell_36
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lr_model = LinearRegression() lr_model.fit(x_train, y_train) lr_model.intercept_ lr_model.coef_ lr_model.score(x_train, y_train)
code
130025335/cell_25
[ "image_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() label_counts = train_df['labels'].value_counts() colormap = plt.cm.get_cmap('tab10') num_labels = len(label_counts) colors = [colormap(i) for i in range(num_labels)] plt.xticks(rotation=45) for i, count in enumerate(label_counts): plt.text(i, count + 50, str(count), ha='center') total_count = label_counts.sum() plt.text(0.5, 1.08, f'Total Count: {total_count}', transform=plt.gca().transAxes, ha='center', fontsize=12, fontweight='bold') max_count = max(label_counts) y_limit = max_count + max_count * 0.1 plt.ylim(top=y_limit) initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename # List of image filenames image_filenames = [ originals[0], duplicates[0], originals[1], duplicates[1], originals[2], duplicates[2]] # Create a figure with a 3x2 grid of subplots fig, axes = plt.subplots(3, 2, figsize=(10, 8)) # Iterate over the image filenames and corresponding subplots for i, (image_filename, ax) in enumerate(zip(image_filenames, axes.flatten())): # Load the image image = plt.imread(format_tpu_path(image_filename)) # Show the image in the subplot ax.imshow(image) ax.axis('off') # Set subplot title if i % 2 == 0: title = 'Original' else: title = 'Duplicate' ax.set_title(title) # Adjust the layout of subplots to avoid overlapping plt.tight_layout() # Display the figure plt.show() train_df.labels.value_counts() label_counts = train_df['labels'].value_counts() colormap = plt.cm.get_cmap('tab10') num_labels = len(label_counts) colors = [colormap(i) for i in range(num_labels)] plt.figure(figsize=(10, 6)) plt.bar(label_counts.index, label_counts.values, color=colors) plt.xlabel('Labels') plt.ylabel('Count') plt.title('Label Distribution') plt.xticks(rotation=45) for i, count in enumerate(label_counts): plt.text(i, count + 50, str(count), ha='center') total_count = label_counts.sum() plt.axhline(total_count, color='black', linestyle='--', alpha=0.5) plt.text(0.5, 1.08, f'Total Count: {total_count}', transform=plt.gca().transAxes, ha='center', fontsize=12, fontweight='bold') max_count = max(label_counts) y_limit = max_count + max_count * 0.1 plt.ylim(top=y_limit) plt.show()
code
130025335/cell_30
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename train_df.labels.value_counts() labels = train_df['labels'].tolist() unique_labels = set() for label in labels: unique_labels.update(label.split()) common_labels = [label[0] for label in pd.Series(labels).str.split(expand=True).stack().value_counts()[:6].items()] mlb = MultiLabelBinarizer(classes=common_labels) label_matrix = mlb.fit_transform(train_df['labels'].str.split()) label_df = pd.DataFrame(label_matrix, columns=common_labels) train_df.reset_index(drop=True, inplace=True) label_df.reset_index(drop=True, inplace=True) new_df = pd.concat([train_df, label_df], axis=1) train_df = new_df.drop('labels', axis=1) train_df.head()
code
130025335/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename print(f'Number of duplicates found: {initial_length - len(hashes)}')
code
130025335/cell_29
[ "image_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename train_df.labels.value_counts() train_df
code
130025335/cell_11
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df)
code
130025335/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import random from tensorflow.keras.utils import load_img import matplotlib.pyplot as plt import glob as gb from kaggle_datasets import KaggleDatasets !pip install -q efficientnet import efficientnet.tfkeras as efn import tensorflow as tf from sklearn.utils import shuffle from sklearn.model_selection import train_test_split from tensorflow.keras.models import Sequential from tensorflow.keras.applications import EfficientNetB7 from tensorflow.keras.applications import EfficientNetB4 from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, TensorBoard, ModelCheckpoint from tensorflow.keras.utils import plot_model from IPython.display import SVG, Image import cv2 from sklearn.preprocessing import MultiLabelBinarizer import os import hashlib from PIL import Image import albumentations as A
code
130025335/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) train_df.labels.value_counts() label_counts = train_df['labels'].value_counts() colormap = plt.cm.get_cmap('tab10') num_labels = len(label_counts) colors = [colormap(i) for i in range(num_labels)] plt.figure(figsize=(10, 6)) plt.bar(label_counts.index, label_counts.values, color=colors) plt.xlabel('Labels') plt.ylabel('Count') plt.title('Label Distribution') plt.xticks(rotation=45) for i, count in enumerate(label_counts): plt.text(i, count + 50, str(count), ha='center') total_count = label_counts.sum() plt.axhline(total_count, color='black', linestyle='--', alpha=0.5) plt.text(0.5, 1.08, f'Total Count: {total_count}', transform=plt.gca().transAxes, ha='center', fontsize=12, fontweight='bold') max_count = max(label_counts) y_limit = max_count + max_count * 0.1 plt.ylim(top=y_limit) plt.show()
code
130025335/cell_28
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename train_df.labels.value_counts() labels = train_df['labels'].tolist() unique_labels = set() for label in labels: unique_labels.update(label.split()) print(unique_labels, 'suma:', len(unique_labels))
code
130025335/cell_3
[ "text_plain_output_1.png" ]
import tensorflow as tf gpus = tf.config.list_physical_devices('GPU') print(gpus) if len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device='/gpu:0') else: strategy = tf.distribute.MirroredStrategy()
code
130025335/cell_17
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) train_df.labels.value_counts()
code
130025335/cell_31
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename train_df.labels.value_counts() labels = train_df['labels'].tolist() unique_labels = set() for label in labels: unique_labels.update(label.split()) common_labels = [label[0] for label in pd.Series(labels).str.split(expand=True).stack().value_counts()[:6].items()] mlb = MultiLabelBinarizer(classes=common_labels) label_matrix = mlb.fit_transform(train_df['labels'].str.split()) label_df = pd.DataFrame(label_matrix, columns=common_labels) train_df.reset_index(drop=True, inplace=True) label_df.reset_index(drop=True, inplace=True) new_df = pd.concat([train_df, label_df], axis=1) train_df = new_df.drop('labels', axis=1) train_df
code
130025335/cell_24
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename train_df.labels.value_counts()
code
130025335/cell_22
[ "image_output_1.png" ]
from tensorflow.keras.utils import load_img import cv2 import hashlib import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random def load_image(filename): image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + filename) def load_random_image(filenames): sample = random.choice(filenames) image = load_img('../input/plant-pathology-2021-fgvc8/train_images/' + sample) def load_augmented_random_image(filenames): sample = random.choice(filenames) image = load_img('/kaggle/working/' + sample) def load_image_for_augmentation(image_path): image = cv2.imread(image_path) if image.shape[-1] == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) image = np.array(image) return image def format_path_gcs(st): return GCS_DS_PATH + '/train_images/' + st def format_resized_image_path_gcs(st): return GCS_DS_PATH + '/img_sz_384/' + st def format_tpu_path(st): return '/kaggle/input/resized-plant2021' + '/img_sz_384/' + st # Defining mem usage reduction function to try to make dataframes lighter def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df train_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') IMAGE_PATH = '../input/plant-pathology-2021-fgvc8/test-images/' train_df = reduce_mem_usage(train_df) RESIZED_IMAGE_PATH = '../input/resized-plant2021/img_sz_384/' train_df.labels.value_counts() label_counts = train_df['labels'].value_counts() colormap = plt.cm.get_cmap('tab10') num_labels = len(label_counts) colors = [colormap(i) for i in range(num_labels)] plt.xticks(rotation=45) for i, count in enumerate(label_counts): plt.text(i, count + 50, str(count), ha='center') total_count = label_counts.sum() plt.text(0.5, 1.08, f'Total Count: {total_count}', transform=plt.gca().transAxes, ha='center', fontsize=12, fontweight='bold') max_count = max(label_counts) y_limit = max_count + max_count * 0.1 plt.ylim(top=y_limit) initial_length = len(train_df) hashes = {} duplicates = [] originals = [] for index, row in train_df.iterrows(): filename = row['image'] with open(os.path.join(RESIZED_IMAGE_PATH, filename), 'rb') as f: hash = hashlib.md5(f.read()).hexdigest() if hash in hashes: duplicates.append(filename) originals.append(hashes[hash]) train_df.drop(index, inplace=True) else: hashes[hash] = filename image_filenames = [originals[0], duplicates[0], originals[1], duplicates[1], originals[2], duplicates[2]] fig, axes = plt.subplots(3, 2, figsize=(10, 8)) for i, (image_filename, ax) in enumerate(zip(image_filenames, axes.flatten())): image = plt.imread(format_tpu_path(image_filename)) ax.imshow(image) ax.axis('off') if i % 2 == 0: title = 'Original' else: title = 'Duplicate' ax.set_title(title) plt.tight_layout() plt.show()
code
130025335/cell_5
[ "image_output_1.png" ]
import tensorflow as tf gpus = tf.config.list_physical_devices('GPU') if len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device='/gpu:0') else: strategy = tf.distribute.MirroredStrategy() tf.config.optimizer.set_experimental_options({'auto_mixed_precision': True}) print('Mixed precision enabled')
code
18159957/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) sns.catplot(x='target', hue='loan', kind='count', data=df) plt.title('result by personal loan status') plt.xlabel('results(y) of current campaign') plt.show()
code
18159957/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) print(df.describe())
code
18159957/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) print(df.dtypes)
code
18159957/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) print(df['age'].value_counts()) print(df['job'].value_counts()) print(df['marital'].value_counts()) print(df['education'].value_counts()) print(df['default'].value_counts()) print(df['housing'].value_counts()) print(df['loan'].value_counts()) print(df['contact'].value_counts()) print(df['month'].value_counts()) print(df['day_of_week'].value_counts()) print(df['duration'].value_counts()) print(df['campaign'].value_counts()) print(df['pdays'].value_counts()) print(df['previous'].value_counts()) print(df['poutcome'].value_counts()) print(df['emp.var.rate'].value_counts()) print(df['cons.price.idx'].value_counts()) print(df['cons.conf.idx'].value_counts()) print(df['euribor3m'].value_counts()) print(df['nr.employed'].value_counts()) print(df['target'].value_counts())
code
18159957/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) ldf = df[df.default == 'yes'] #result(y) vs previous campaign outcome(poutcome) vs duration g = sns.catplot(x="duration", y="target", row = "poutcome", kind="box", orient="h", height=2.5, aspect=5, data=df) sns.catplot(x='target', hue='job', kind='count', data=df) plt.title('Result by job type of clients') plt.xlabel('results(y) of current campaign') plt.show()
code
18159957/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) print(df.head())
code
18159957/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) ldf = df[df.default == 'yes'] #result(y) vs previous campaign outcome(poutcome) vs duration g = sns.catplot(x="duration", y="target", row = "poutcome", kind="box", orient="h", height=2.5, aspect=5, data=df) sns.catplot(x='target', kind='count', data=df) plt.title('results (target count) of current campaign') plt.xlabel('results(target) of current campaign') plt.show()
code
18159957/cell_8
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) print(df.shape)
code
18159957/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) sns.catplot(x='target', hue='default', kind='count', data=df) plt.title('result by default status') plt.xlabel('results(y) of current campaign') plt.show()
code
18159957/cell_16
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) ldf = df[df.default == 'yes'] print(ldf)
code
18159957/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) ldf = df[df.default == 'yes'] g = sns.catplot(x='duration', y='target', row='poutcome', kind='box', orient='h', height=2.5, aspect=5, data=df)
code
18159957/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) sns.catplot(x='target', hue='housing', kind='count', data=df) plt.title('result by housing loan status') plt.show()
code
18159957/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-additional-full.csv', sep=';', decimal='.', header=0, names=['age', 'job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'target']) print(df.isnull().sum())
code