path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32068402/cell_58
[ "text_plain_output_1.png" ]
from datetime import datetime from gensim.models.phrases import Phraser from pprint import pprint from sklearn.preprocessing import normalize from typing import List import contractions import ftfy import gensim.models.keyedvectors as word2vec import numpy as np import operator import os import pandas as pd import re import string import re CURRENCIES = {'$': 'USD', 'zł': 'PLN', '£': 'GBP', '¥': 'JPY', '฿': 'THB', '₡': 'CRC', '₦': 'NGN', '₩': 'KRW', '₪': 'ILS', '₫': 'VND', '€': 'EUR', '₱': 'PHP', '₲': 'PYG', '₴': 'UAH', '₹': 'INR'} RE_NUMBER = re.compile('(?:^|(?<=[^\\w,.]))[+–-]?(([1-9]\\d{0,2}(,\\d{3})+(\\.\\d*)?)|([1-9]\\d{0,2}([ .]\\d{3})+(,\\d*)?)|(\\d*?[.,]\\d+)|\\d+)(?:$|(?=\\b))') RE_URL = re.compile('((http://www\\.|https://www\\.|http://|https://)?' + '[a-z0-9]+([\\-.][a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?)') STOP_WORDS = {'a', 'an', 'and', 'are', 'as', 'at', 'be', 'but', 'by', 'for', 'if', 'in', 'into', 'is', 'it', 'no', 'not', 'of', 'on', 'or', 'such', 'that', 'the', 'their', 'then', 'there', 'these', 'they', 'this', 'to', 'was', 'will', 'with'} import string from typing import List import ftfy import contractions def clean_tokenized_sentence(tokens: List[str], unicode_normalization='NFC', unpack_contractions=False, replace_currency_symbols=False, remove_punct=True, remove_numbers=False, lowercase=True, remove_urls=True, remove_stop_words=True) -> str: if remove_stop_words: tokens = [token for token in tokens if token not in STOP_WORDS] sentence = ' '.join(tokens) if unicode_normalization: sentence = ftfy.fix_text(sentence, normalization=unicode_normalization) if unpack_contractions: sentence = contractions.fix(sentence, slang=False) if replace_currency_symbols: for currency_sign, currency_tok in CURRENCIES.items(): sentence = sentence.replace(currency_sign, f'{currency_tok} ') if remove_urls: sentence = RE_URL.sub('_URL_', sentence) if remove_punct: sentence = sentence.translate(str.maketrans('', '', string.punctuation)) sentence = re.sub(' +', ' ', sentence) if remove_numbers: sentence = RE_NUMBER.sub('_NUMBER_', sentence) if lowercase: sentence = sentence.lower() return sentence sentences_df = pd.read_csv('../input/covid19sentencesmetadata/sentences_with_metadata.csv') bigram_model = Phraser.load('../input/covid19phrasesmodels/covid_bigram_model_v0.pkl') trigram_model = Phraser.load('../input/covid19phrasesmodels/covid_trigram_model_v0.pkl') fasttext_model_dir = '../input/fasttext-no-subwords-trigrams' num_points = 400 first_line = True index_to_word = [] with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f: for line_num, line in enumerate(f): if first_line: dim = int(line.strip().split()[1]) word_vecs = np.zeros((num_points, dim), dtype=float) first_line = False continue line = line.strip() word = line.split()[0] vec = word_vecs[line_num - 1] for index, vec_val in enumerate(line.split()[1:]): vec[index] = float(vec_val) index_to_word.append(word) if line_num >= num_points: break word_vecs = normalize(word_vecs, copy=False, return_norm=False) from pprint import pprint import gensim.models.keyedvectors as word2vec fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt')) def print_most_similar(search_term): synonyms = fasttext_model.most_similar(search_term) def create_articles_metadata_mapping(sentences_df: pd.DataFrame) -> dict: sentence_id_to_metadata = {} for row_count, row in sentences_df.iterrows(): sentence_id_to_metadata[row_count] = dict(paper_id=row['paper_id'], cord_uid=row['cord_uid'], source=row['source'], url=row['url'], publish_time=row['publish_time'], authors=row['authors'], section=row['section'], sentence=row['sentence']) return sentence_id_to_metadata sentence_id_to_metadata = create_articles_metadata_mapping(sentences_df) import operator from datetime import datetime class SearchEngine: def __init__(self, sentence_id_to_metadata: dict, sentences_df: pd.DataFrame, bigram_model, trigram_model, fasttext_model): self.sentence_id_to_metadata = sentence_id_to_metadata self.cleaned_sentences = sentences_df['cleaned_sentence'].tolist() self.bigram_model = bigram_model self.trigram_model = trigram_model self.fasttext_model = fasttext_model def _get_search_terms(self, keywords, synonyms_threshold): cleaned_terms = [clean_tokenized_sentence(keyword.split(' ')) for keyword in keywords] cleaned_terms = [term for term in cleaned_terms if term] terms_with_bigrams = self.bigram_model[' '.join(cleaned_terms).split(' ')] terms_with_trigrams = self.trigram_model[terms_with_bigrams] search_terms = [self.fasttext_model.most_similar(token) for token in terms_with_trigrams] search_terms = [synonym[0] for synonyms in search_terms for synonym in synonyms if synonym[1] >= synonyms_threshold] search_terms = list(terms_with_trigrams) + search_terms return search_terms def search(self, keywords: List[str], optional_keywords=None, top_n: int=10, synonyms_threshold=0.7, keyword_weight: float=3.0, optional_keyword_weight: float=0.5) -> List[dict]: if optional_keywords is None: optional_keywords = [] search_terms = self._get_search_terms(keywords, synonyms_threshold) optional_search_terms = self._get_search_terms(optional_keywords, synonyms_threshold) if optional_keywords else [] date_today = datetime.today() indexes = [] match_counts = [] days_diffs = [] for sentence_index, sentence in enumerate(self.cleaned_sentences): sentence_tokens = sentence.split(' ') sentence_tokens_set = set(sentence_tokens) match_count = sum([keyword_weight if keyword in sentence_tokens_set else 0 for keyword in search_terms]) if match_count > 0: indexes.append(sentence_index) if optional_search_terms: match_count += sum([optional_keyword_weight if keyword in sentence_tokens_set else 0 for keyword in optional_search_terms]) match_counts.append(match_count) article_date = self.sentence_id_to_metadata[sentence_index]['publish_time'] if article_date == '2020': article_date = '2020-01-01' article_date = datetime.strptime(article_date, '%Y-%m-%d') days_diff = (date_today - article_date).days days_diffs.append(days_diff) match_counts = [float(match_count) / sum(match_counts) for match_count in match_counts] days_diffs = [max(days_diffs) - days_diff for days_diff in days_diffs] days_diffs = [float(days_diff) / sum(days_diffs) for days_diff in days_diffs] index_to_score = {} for index, match_count, days_diff in zip(indexes, match_counts, days_diffs): index_to_score[index] = 0.7 * match_count + 0.3 * days_diff sorted_indexes = sorted(index_to_score.items(), key=operator.itemgetter(1), reverse=True) sorted_indexes = [item[0] for item in sorted_indexes] sorted_indexes = sorted_indexes[0:min(top_n, len(sorted_indexes))] results = [] for index in sorted_indexes: results.append(self.sentence_id_to_metadata[index]) return results search_engine = SearchEngine(sentence_id_to_metadata, sentences_df, bigram_model, trigram_model, fasttext_model) def search(keywords, optional_keywords=None, top_n=10, synonyms_threshold=0.8, only_sentences=False): results = search_engine.search(keywords, optional_keywords=optional_keywords, top_n=top_n, synonyms_threshold=synonyms_threshold) search(keywords=['spillover', 'bats', 'snakes', 'exotic animals', 'seafood'], optional_keywords=['new coronavirus', 'coronavirus', 'covid19'], top_n=3)
code
32068402/cell_28
[ "text_plain_output_1.png" ]
from gensim.models.phrases import Phraser bigram_model = Phraser.load('../input/covid19phrasesmodels/covid_bigram_model_v0.pkl') bigram_model['despite social media often vehicle fake news boast news hype also worth noting tremendous effort scientific community provide free uptodate information ongoing studies well critical evaluations'.split()]
code
32068402/cell_8
[ "text_html_output_10.png", "text_html_output_22.png", "text_html_output_16.png", "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_15.png", "text_html_output_5.png", "text_html_output_14.png", "text_html_output_23.png", "text_html_output_19.png", "text_html_output_9.png", "text_html_output_13.png", "text_html_output_20.png", "text_html_output_21.png", "text_html_output_1.png", "text_html_output_17.png", "text_html_output_18.png", "text_html_output_12.png", "text_html_output_11.png", "text_html_output_24.png", "text_html_output_8.png", "text_html_output_25.png", "text_html_output_3.png", "text_html_output_7.png" ]
['2019-ncov', '2019 novel coronavirus', 'coronavirus 2019', 'coronavirus disease 19', 'covid-19', 'covid 19', 'ncov-2019', 'sars-cov-2', 'wuhan coronavirus', 'wuhan pneumonia', 'wuhan virus']
code
32068402/cell_80
[ "text_html_output_1.png" ]
from IPython.display import display, HTML from datetime import datetime from gensim.models.phrases import Phraser from pprint import pprint from sklearn.preprocessing import normalize from transformers import BartTokenizer, BartForConditionalGeneration from typing import List import contractions import ftfy import gensim.models.keyedvectors as word2vec import json import numpy as np import operator import os import pandas as pd import re import string import torch import re CURRENCIES = {'$': 'USD', 'zł': 'PLN', '£': 'GBP', '¥': 'JPY', '฿': 'THB', '₡': 'CRC', '₦': 'NGN', '₩': 'KRW', '₪': 'ILS', '₫': 'VND', '€': 'EUR', '₱': 'PHP', '₲': 'PYG', '₴': 'UAH', '₹': 'INR'} RE_NUMBER = re.compile('(?:^|(?<=[^\\w,.]))[+–-]?(([1-9]\\d{0,2}(,\\d{3})+(\\.\\d*)?)|([1-9]\\d{0,2}([ .]\\d{3})+(,\\d*)?)|(\\d*?[.,]\\d+)|\\d+)(?:$|(?=\\b))') RE_URL = re.compile('((http://www\\.|https://www\\.|http://|https://)?' + '[a-z0-9]+([\\-.][a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?)') STOP_WORDS = {'a', 'an', 'and', 'are', 'as', 'at', 'be', 'but', 'by', 'for', 'if', 'in', 'into', 'is', 'it', 'no', 'not', 'of', 'on', 'or', 'such', 'that', 'the', 'their', 'then', 'there', 'these', 'they', 'this', 'to', 'was', 'will', 'with'} import string from typing import List import ftfy import contractions def clean_tokenized_sentence(tokens: List[str], unicode_normalization='NFC', unpack_contractions=False, replace_currency_symbols=False, remove_punct=True, remove_numbers=False, lowercase=True, remove_urls=True, remove_stop_words=True) -> str: if remove_stop_words: tokens = [token for token in tokens if token not in STOP_WORDS] sentence = ' '.join(tokens) if unicode_normalization: sentence = ftfy.fix_text(sentence, normalization=unicode_normalization) if unpack_contractions: sentence = contractions.fix(sentence, slang=False) if replace_currency_symbols: for currency_sign, currency_tok in CURRENCIES.items(): sentence = sentence.replace(currency_sign, f'{currency_tok} ') if remove_urls: sentence = RE_URL.sub('_URL_', sentence) if remove_punct: sentence = sentence.translate(str.maketrans('', '', string.punctuation)) sentence = re.sub(' +', ' ', sentence) if remove_numbers: sentence = RE_NUMBER.sub('_NUMBER_', sentence) if lowercase: sentence = sentence.lower() return sentence sentences_df = pd.read_csv('../input/covid19sentencesmetadata/sentences_with_metadata.csv') bigram_model = Phraser.load('../input/covid19phrasesmodels/covid_bigram_model_v0.pkl') trigram_model = Phraser.load('../input/covid19phrasesmodels/covid_trigram_model_v0.pkl') fasttext_model_dir = '../input/fasttext-no-subwords-trigrams' num_points = 400 first_line = True index_to_word = [] with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f: for line_num, line in enumerate(f): if first_line: dim = int(line.strip().split()[1]) word_vecs = np.zeros((num_points, dim), dtype=float) first_line = False continue line = line.strip() word = line.split()[0] vec = word_vecs[line_num - 1] for index, vec_val in enumerate(line.split()[1:]): vec[index] = float(vec_val) index_to_word.append(word) if line_num >= num_points: break word_vecs = normalize(word_vecs, copy=False, return_norm=False) from pprint import pprint import gensim.models.keyedvectors as word2vec fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt')) def print_most_similar(search_term): synonyms = fasttext_model.most_similar(search_term) def create_articles_metadata_mapping(sentences_df: pd.DataFrame) -> dict: sentence_id_to_metadata = {} for row_count, row in sentences_df.iterrows(): sentence_id_to_metadata[row_count] = dict(paper_id=row['paper_id'], cord_uid=row['cord_uid'], source=row['source'], url=row['url'], publish_time=row['publish_time'], authors=row['authors'], section=row['section'], sentence=row['sentence']) return sentence_id_to_metadata sentence_id_to_metadata = create_articles_metadata_mapping(sentences_df) import operator from datetime import datetime class SearchEngine: def __init__(self, sentence_id_to_metadata: dict, sentences_df: pd.DataFrame, bigram_model, trigram_model, fasttext_model): self.sentence_id_to_metadata = sentence_id_to_metadata self.cleaned_sentences = sentences_df['cleaned_sentence'].tolist() self.bigram_model = bigram_model self.trigram_model = trigram_model self.fasttext_model = fasttext_model def _get_search_terms(self, keywords, synonyms_threshold): cleaned_terms = [clean_tokenized_sentence(keyword.split(' ')) for keyword in keywords] cleaned_terms = [term for term in cleaned_terms if term] terms_with_bigrams = self.bigram_model[' '.join(cleaned_terms).split(' ')] terms_with_trigrams = self.trigram_model[terms_with_bigrams] search_terms = [self.fasttext_model.most_similar(token) for token in terms_with_trigrams] search_terms = [synonym[0] for synonyms in search_terms for synonym in synonyms if synonym[1] >= synonyms_threshold] search_terms = list(terms_with_trigrams) + search_terms return search_terms def search(self, keywords: List[str], optional_keywords=None, top_n: int=10, synonyms_threshold=0.7, keyword_weight: float=3.0, optional_keyword_weight: float=0.5) -> List[dict]: if optional_keywords is None: optional_keywords = [] search_terms = self._get_search_terms(keywords, synonyms_threshold) optional_search_terms = self._get_search_terms(optional_keywords, synonyms_threshold) if optional_keywords else [] date_today = datetime.today() indexes = [] match_counts = [] days_diffs = [] for sentence_index, sentence in enumerate(self.cleaned_sentences): sentence_tokens = sentence.split(' ') sentence_tokens_set = set(sentence_tokens) match_count = sum([keyword_weight if keyword in sentence_tokens_set else 0 for keyword in search_terms]) if match_count > 0: indexes.append(sentence_index) if optional_search_terms: match_count += sum([optional_keyword_weight if keyword in sentence_tokens_set else 0 for keyword in optional_search_terms]) match_counts.append(match_count) article_date = self.sentence_id_to_metadata[sentence_index]['publish_time'] if article_date == '2020': article_date = '2020-01-01' article_date = datetime.strptime(article_date, '%Y-%m-%d') days_diff = (date_today - article_date).days days_diffs.append(days_diff) match_counts = [float(match_count) / sum(match_counts) for match_count in match_counts] days_diffs = [max(days_diffs) - days_diff for days_diff in days_diffs] days_diffs = [float(days_diff) / sum(days_diffs) for days_diff in days_diffs] index_to_score = {} for index, match_count, days_diff in zip(indexes, match_counts, days_diffs): index_to_score[index] = 0.7 * match_count + 0.3 * days_diff sorted_indexes = sorted(index_to_score.items(), key=operator.itemgetter(1), reverse=True) sorted_indexes = [item[0] for item in sorted_indexes] sorted_indexes = sorted_indexes[0:min(top_n, len(sorted_indexes))] results = [] for index in sorted_indexes: results.append(self.sentence_id_to_metadata[index]) return results task_id = 2 import json with open(f'../input/covid19seedsentences/{task_id}.json') as in_fp: seed_sentences_json = json.load(in_fp) import torch from transformers import BartTokenizer, BartForConditionalGeneration class BartSummarizer: def __init__(self): self.torch_device = 'cuda' if torch.cuda.is_available() else 'cpu' model_name = 'bart-large-cnn' self.tokenizer_summarize = BartTokenizer.from_pretrained(model_name) self.model_summarize = BartForConditionalGeneration.from_pretrained(model_name) self.model_summarize.to(self.torch_device) self.model_summarize.eval() def create_summary(self, text: str, repetition_penalty=1.0) -> str: text_input_ids = self.tokenizer_summarize.batch_encode_plus([text], return_tensors='pt', max_length=1024)['input_ids'].to(self.torch_device) summary_ids = self.model_summarize.generate(text_input_ids, num_beams=4, max_length=1024, min_length=256, no_repeat_ngram_size=4, repetition_penalty=repetition_penalty) summary = self.tokenizer_summarize.decode(summary_ids.squeeze(), skip_special_tokens=True) return summary bart_summarizer = BartSummarizer() with open(f'../input/covid19seedsentences/{task_id}_relevant_sentences.json') as in_fp: relevant_sentences_json = json.load(in_fp) answers_results = [] for idx, sub_task_json in enumerate(relevant_sentences_json['subTasks']): sub_task_description = sub_task_json['sub_task_description'] best_sentences = seed_sentences_json['subTasks'][idx]['bestSentences'] relevant_sentences = sub_task_json['relevant_sentences'] relevant_sentences_texts = [result['sentence'] for result in relevant_sentences] sub_task_summary = bart_summarizer.create_summary(' '.join(best_sentences + relevant_sentences_texts)) answers_results.append(dict(sub_task_description=sub_task_description, relevant_sentences=relevant_sentences, sub_task_summary=sub_task_summary)) from IPython.display import display, HTML pd.set_option('display.max_colwidth', 0) def display_summary(summary: str): return def display_sub_task_description(sub_task_description): return def display_task_name(task_name): return def visualize_output(sub_task_json): """ Prints output for each sub-task """ results = sub_task_json.get('relevant_sentences') sentence_output = pd.DataFrame(sub_task_json.get('relevant_sentences')) sentence_output.rename(columns={'sentence': 'Relevant Sentence', 'cord_id': 'CORD UID', 'publish_time': 'Publish Time', 'url': 'URL', 'source': 'Source'}, inplace=True) def save_output(seed_sentences, sub_task_json): """ Saves output for each sub-task """ sentence_output = pd.DataFrame(sub_task_json.get('relevant_sentences')) sentence_output.rename(columns={'sentence': 'Relevant Sentence', 'cord_id': 'CORD UID', 'publish_time': 'Publish Time', 'url': 'URL', 'source': 'Source'}, inplace=True) return sentence_output[['cord_uid', 'Source', 'Publish Time', 'Relevant Sentence', 'URL']] relevant_sentences = [] for idx, sub_task_json in enumerate(answers_results): task_sentences = save_output(seed_sentences_json['subTasks'][idx]['bestSentences'], sub_task_json) relevant_sentences.append(task_sentences) all_relevant_sentences = pd.concat(relevant_sentences).reset_index() all_relevant_sentences.head(1)
code
32068402/cell_47
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from pprint import pprint from sklearn.preprocessing import normalize import gensim.models.keyedvectors as word2vec import numpy as np import os fasttext_model_dir = '../input/fasttext-no-subwords-trigrams' num_points = 400 first_line = True index_to_word = [] with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f: for line_num, line in enumerate(f): if first_line: dim = int(line.strip().split()[1]) word_vecs = np.zeros((num_points, dim), dtype=float) first_line = False continue line = line.strip() word = line.split()[0] vec = word_vecs[line_num - 1] for index, vec_val in enumerate(line.split()[1:]): vec[index] = float(vec_val) index_to_word.append(word) if line_num >= num_points: break word_vecs = normalize(word_vecs, copy=False, return_norm=False) from pprint import pprint import gensim.models.keyedvectors as word2vec fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt')) def print_most_similar(search_term): synonyms = fasttext_model.most_similar(search_term) print_most_similar('fake_news')
code
32068402/cell_46
[ "image_output_1.png" ]
from pprint import pprint from sklearn.preprocessing import normalize import gensim.models.keyedvectors as word2vec import numpy as np import os fasttext_model_dir = '../input/fasttext-no-subwords-trigrams' num_points = 400 first_line = True index_to_word = [] with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f: for line_num, line in enumerate(f): if first_line: dim = int(line.strip().split()[1]) word_vecs = np.zeros((num_points, dim), dtype=float) first_line = False continue line = line.strip() word = line.split()[0] vec = word_vecs[line_num - 1] for index, vec_val in enumerate(line.split()[1:]): vec[index] = float(vec_val) index_to_word.append(word) if line_num >= num_points: break word_vecs = normalize(word_vecs, copy=False, return_norm=False) from pprint import pprint import gensim.models.keyedvectors as word2vec fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt')) def print_most_similar(search_term): synonyms = fasttext_model.most_similar(search_term) print_most_similar('new_coronavirus')
code
32068402/cell_24
[ "text_html_output_1.png" ]
import pandas as pd sentences_df = pd.read_csv('../input/covid19sentencesmetadata/sentences_with_metadata.csv') print(f'Sentence count: {len(sentences_df)}')
code
32068402/cell_14
[ "text_plain_output_1.png" ]
!pip install contractions
code
34128236/cell_42
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() year_labels = [] for z in range(2010, 2021): year_labels.append(z) fight_counts = [] for z in year_labels: fight_counts.append(len(df[df['date'].dt.year == z])) female_fight_counts = [] for z in year_labels: female_fight_counts.append(len(df[(df['date'].dt.year == z) & (df['gender'] == 'FEMALE')])) df_no_even = df[df['underdog'] != 'Even'] df_no_even = df_no_even[df_no_even['Winner'] != 'Draw'] number_of_fights = len(df_no_even) number_of_upsets = len(df_no_even[df_no_even['Winner'] == df_no_even['underdog']]) number_of_favorites = len(df_no_even[df_no_even['Winner'] != df_no_even['underdog']]) upset_percent = number_of_upsets / number_of_fights * 100 favorite_percent = number_of_favorites / number_of_fights * 100 labels = ('Favorites', 'Underdogs') sizes = [favorite_percent, upset_percent] fig1, ax1 = plt.subplots(figsize=(9, 9)) ax1.pie(sizes, labels=labels, autopct='%1.1f%%', textprops={'fontsize': 14})
code
34128236/cell_21
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df['country'] = df['country'].str.strip() display(df[['country']].describe()) display(df['country'].unique())
code
34128236/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df[['R_fighter', 'B_fighter']].describe()
code
34128236/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() print(df['title_bout'].describe())
code
34128236/cell_23
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() print(df['Winner'].describe()) print() print(df['Winner'].unique())
code
34128236/cell_33
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() year_labels = [] for z in range(2010, 2021): year_labels.append(z) fight_counts = [] for z in year_labels: fight_counts.append(len(df[df['date'].dt.year == z])) plt.figure(figsize=(9, 5)) plt.plot(year_labels, fight_counts) plt.xlabel('Year', fontsize=16) plt.ylabel('# of Fights', fontsize=16) plt.title('Fights Per Year', fontweight='bold', fontsize=16) plt.show()
code
34128236/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() print(df['gender'].describe())
code
34128236/cell_41
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df_no_even = df[df['underdog'] != 'Even'] df_no_even = df_no_even[df_no_even['Winner'] != 'Draw'] print(f'Number of fights including even fights and draws: {len(df)}') print(f'Number of fights with even fights and draws removed: {len(df_no_even)}')
code
34128236/cell_19
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df[['location']].describe()
code
34128236/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() year_labels = [] for z in range(2010, 2021): year_labels.append(z) fight_counts = [] for z in year_labels: fight_counts.append(len(df[df['date'].dt.year == z])) female_fight_counts = [] for z in year_labels: female_fight_counts.append(len(df[(df['date'].dt.year == z) & (df['gender'] == 'FEMALE')])) df_no_even = df[df['underdog'] != 'Even'] df_no_even = df_no_even[df_no_even['Winner'] != 'Draw'] number_of_fights = len(df_no_even) number_of_upsets = len(df_no_even[df_no_even['Winner'] == df_no_even['underdog']]) number_of_favorites = len(df_no_even[df_no_even['Winner'] != df_no_even['underdog']]) #print(number_of_upsets) #print(number_of_fights) #print(number_of_favorites) upset_percent = (number_of_upsets / number_of_fights) * 100 favorite_percent = (number_of_favorites / number_of_fights) * 100 #print(upset_percent) #print(favorite_percent) labels = 'Favorites', 'Underdogs' sizes = [favorite_percent, upset_percent] fig1, ax1 = plt.subplots(figsize=(9,9)) ax1.pie(sizes, labels=labels, autopct='%1.1f%%', textprops={'fontsize': 14}) year_labels year_fight_counts = [] year_upset_counts = [] year_upset_percent = [] for y in year_labels: temp_fights = df_no_even[df_no_even['date'].dt.year == y] temp_upsets = temp_fights[temp_fights['Winner'] == temp_fights['underdog']] year_fight_counts.append(len(temp_fights)) year_upset_counts.append(len(temp_upsets)) year_upset_percent.append(len(temp_upsets) / len(temp_fights)) year_upset_percent = [x * 100 for x in year_upset_percent] plt.figure(figsize=(9, 5)) barlist = plt.bar(year_labels, year_upset_percent) plt.xlabel('Year', fontsize=16) plt.ylabel('Percent of Upset Winners', fontsize=16) plt.xticks(year_labels, rotation=90) plt.title('Upset Percentage By Year', fontweight='bold', fontsize=16) barlist[10].set_color('black') barlist[3].set_color('grey')
code
34128236/cell_49
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df['date'] = pd.to_datetime(df['date']) df = df.dropna() year_labels = [] for z in range(2010, 2021): year_labels.append(z) fight_counts = [] for z in year_labels: fight_counts.append(len(df[df['date'].dt.year == z])) female_fight_counts = [] for z in year_labels: female_fight_counts.append(len(df[(df['date'].dt.year == z) & (df['gender'] == 'FEMALE')])) df_no_even = df[df['underdog'] != 'Even'] df_no_even = df_no_even[df_no_even['Winner'] != 'Draw'] number_of_fights = len(df_no_even) number_of_upsets = len(df_no_even[df_no_even['Winner'] == df_no_even['underdog']]) number_of_favorites = len(df_no_even[df_no_even['Winner'] != df_no_even['underdog']]) #print(number_of_upsets) #print(number_of_fights) #print(number_of_favorites) upset_percent = (number_of_upsets / number_of_fights) * 100 favorite_percent = (number_of_favorites / number_of_fights) * 100 #print(upset_percent) #print(favorite_percent) labels = 'Favorites', 'Underdogs' sizes = [favorite_percent, upset_percent] fig1, ax1 = plt.subplots(figsize=(9,9)) ax1.pie(sizes, labels=labels, autopct='%1.1f%%', textprops={'fontsize': 14}) year_labels year_fight_counts = [] year_upset_counts = [] year_upset_percent = [] for y in year_labels: temp_fights = df_no_even[df_no_even['date'].dt.year==y] temp_upsets = temp_fights[temp_fights['Winner'] == temp_fights['underdog']] year_fight_counts.append(len(temp_fights)) year_upset_counts.append(len(temp_upsets)) year_upset_percent.append(len(temp_upsets)/len(temp_fights)) #print(year_fight_counts) #print() #print(year_upset_counts) #print() #print(year_upset_percent) year_upset_percent = [x*100 for x in year_upset_percent] plt.figure(figsize=(9,5)) barlist = plt.bar(year_labels, year_upset_percent) plt.xlabel("Year", fontsize=16) plt.ylabel("Percent of Upset Winners", fontsize=16) plt.xticks(year_labels, rotation=90) plt.title('Upset Percentage By Year', fontweight='bold', fontsize=16) barlist[10].set_color('black') barlist[3].set_color('grey') temp_df = pd.DataFrame({"Percent of Underdog Winners": year_upset_percent}, index=year_labels) fig, ax = plt.subplots(figsize=(4,8)) sns.heatmap(temp_df, annot=True, fmt=".4g", cmap='binary', ax=ax) plt.yticks(rotation=0) plt.title("Upset Percentage by Year", fontsize=16, fontweight='bold') weight_class_list = ['Flyweight', 'Bantamweight', 'Featherweight', 'Lightweight', 'Welterweight', 'Middleweight', 'Light Heavyweight', 'Heavyweight', "Women's Strawweight", "Women's Flyweight", "Women's Bantamweight", "Women's Featherweight", 'Catch Weight'] wc_fight_counts = [] wc_upset_counts = [] wc_upset_percent = [] for wc in weight_class_list: temp_fights = df_no_even[df_no_even['weight_class'] == wc] temp_upsets = temp_fights[temp_fights['Winner'] == temp_fights['underdog']] wc_fight_counts.append(len(temp_fights)) wc_upset_counts.append(len(temp_upsets)) wc_upset_percent.append(len(temp_upsets) / len(temp_fights)) wc_upset_percent = [x * 100 for x in wc_upset_percent] plt.figure(figsize=(9, 5)) barlist = plt.bar(weight_class_list, wc_upset_percent) plt.xlabel('Weight Class', fontsize=16) plt.ylabel('Percent of Upset Winners', fontsize=16) plt.xticks(weight_class_list, rotation=90) plt.title('Upset Percentage By Weight Class', fontweight='bold', fontsize=16) barlist[9].set_color('black') barlist[11].set_color('grey')
code
34128236/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df[['date']].describe()
code
34128236/cell_38
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df['underdog'] = '' red_underdog_mask = df['R_odds'] > df['B_odds'] blue_underdog_mask = df['B_odds'] > df['R_odds'] even_mask = df['B_odds'] == df['R_odds'] df['underdog'][red_underdog_mask] = 'Red' df['underdog'][blue_underdog_mask] = 'Blue' df['underdog'][even_mask] = 'Even'
code
34128236/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df[['R_odds', 'B_odds']].describe()
code
34128236/cell_35
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() year_labels = [] for z in range(2010, 2021): year_labels.append(z) fight_counts = [] for z in year_labels: fight_counts.append(len(df[df['date'].dt.year == z])) female_fight_counts = [] for z in year_labels: female_fight_counts.append(len(df[(df['date'].dt.year == z) & (df['gender'] == 'FEMALE')])) plt.figure(figsize=(9, 5)) plt.plot(year_labels, female_fight_counts) plt.xlabel('Year', fontsize=16) plt.ylabel('# of Fights', fontsize=16) plt.title('Female Fights Per Year', fontweight='bold', fontsize=16) plt.show()
code
34128236/cell_46
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df['date'] = pd.to_datetime(df['date']) df = df.dropna() year_labels = [] for z in range(2010, 2021): year_labels.append(z) fight_counts = [] for z in year_labels: fight_counts.append(len(df[df['date'].dt.year == z])) female_fight_counts = [] for z in year_labels: female_fight_counts.append(len(df[(df['date'].dt.year == z) & (df['gender'] == 'FEMALE')])) df_no_even = df[df['underdog'] != 'Even'] df_no_even = df_no_even[df_no_even['Winner'] != 'Draw'] number_of_fights = len(df_no_even) number_of_upsets = len(df_no_even[df_no_even['Winner'] == df_no_even['underdog']]) number_of_favorites = len(df_no_even[df_no_even['Winner'] != df_no_even['underdog']]) #print(number_of_upsets) #print(number_of_fights) #print(number_of_favorites) upset_percent = (number_of_upsets / number_of_fights) * 100 favorite_percent = (number_of_favorites / number_of_fights) * 100 #print(upset_percent) #print(favorite_percent) labels = 'Favorites', 'Underdogs' sizes = [favorite_percent, upset_percent] fig1, ax1 = plt.subplots(figsize=(9,9)) ax1.pie(sizes, labels=labels, autopct='%1.1f%%', textprops={'fontsize': 14}) year_labels year_fight_counts = [] year_upset_counts = [] year_upset_percent = [] for y in year_labels: temp_fights = df_no_even[df_no_even['date'].dt.year==y] temp_upsets = temp_fights[temp_fights['Winner'] == temp_fights['underdog']] year_fight_counts.append(len(temp_fights)) year_upset_counts.append(len(temp_upsets)) year_upset_percent.append(len(temp_upsets)/len(temp_fights)) #print(year_fight_counts) #print() #print(year_upset_counts) #print() #print(year_upset_percent) year_upset_percent = [x*100 for x in year_upset_percent] plt.figure(figsize=(9,5)) barlist = plt.bar(year_labels, year_upset_percent) plt.xlabel("Year", fontsize=16) plt.ylabel("Percent of Upset Winners", fontsize=16) plt.xticks(year_labels, rotation=90) plt.title('Upset Percentage By Year', fontweight='bold', fontsize=16) barlist[10].set_color('black') barlist[3].set_color('grey') temp_df = pd.DataFrame({'Percent of Underdog Winners': year_upset_percent}, index=year_labels) fig, ax = plt.subplots(figsize=(4, 8)) sns.heatmap(temp_df, annot=True, fmt='.4g', cmap='binary', ax=ax) plt.yticks(rotation=0) plt.title('Upset Percentage by Year', fontsize=16, fontweight='bold')
code
34128236/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() df.info(verbose=True)
code
34128236/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df = df.dropna() print(df['weight_class'].describe()) print() print(df['weight_class'].unique())
code
34128236/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ufc-fights-2010-2020-with-betting-odds/data.csv') df.info(verbose=True)
code
130010382/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e15/sample_submission.csv') original = pd.read_csv('/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv') train = pd.concat([data, original]) test = train[train['x_e_out [-]'].isnull()] train = train[train['x_e_out [-]'].notnull()] print('Columns:', train.info())
code
130010382/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e15/sample_submission.csv') original = pd.read_csv('/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv') data.head()
code
130010382/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e15/sample_submission.csv') original = pd.read_csv('/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv') train = pd.concat([data, original]) test = train[train['x_e_out [-]'].isnull()] train = train[train['x_e_out [-]'].notnull()] test.head()
code
130010382/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import pandas_profiling import matplotlib.pyplot as plt import seaborn as sns import matplotlib as mpl mpl.rcParams.update(mpl.rcParamsDefault) import warnings warnings.filterwarnings('ignore') from IPython.display import Image from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.isotonic import IsotonicRegression import random import os from copy import deepcopy from functools import partial from itertools import combinations from sklearn.base import BaseEstimator, TransformerMixin from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.model_selection import StratifiedKFold, KFold, RepeatedKFold from sklearn.metrics import roc_auc_score, accuracy_score, mean_squared_error, mean_absolute_error from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.decomposition import PCA import seaborn as sns from sklearn import preprocessing from category_encoders import OneHotEncoder, OrdinalEncoder, CountEncoder from sklearn.model_selection import GridSearchCV from sklearn.ensemble import VotingRegressor import optuna import xgboost as xgb import lightgbm as lgb from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from catboost import CatBoost, CatBoostRegressor, CatBoostClassifier from catboost import Pool from h2o.automl import H2OAutoML import warnings warnings.filterwarnings('ignore', category=UserWarning) import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130010382/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e15/sample_submission.csv') original = pd.read_csv('/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv') train = pd.concat([data, original]) test = train[train['x_e_out [-]'].isnull()] train = train[train['x_e_out [-]'].notnull()] train.head()
code
130010382/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/playground-series-s3e15/data.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e15/sample_submission.csv') original = pd.read_csv('/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv') sample.head()
code
49127503/cell_4
[ "text_plain_output_1.png" ]
a, b, c = (10, 21, 0) for i in range(10): print(a) c = a + b a = b b = c
code
49127503/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
bil1 = int(input('Masukan Angka :')) hasil = bil1 * bil1 * bil1 print('program konversi harga emas ke rupiah') bil1 = int(input('masukan berat emas:')) print('%d' % bil1) hasil = bil1 * 10159000 print('harga emas %d gram adalah:Rp,%d' % (bil1, hasil))
code
49127503/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
bil1 = int(input('Masukan Angka :')) hasil = bil1 * bil1 * bil1 print('Pangkat 3 dari bilangan %d adalah : %d' % (bil1, hasil))
code
49127503/cell_7
[ "text_plain_output_1.png" ]
a, b, c = (10, 21, 0) for i in range(10): c = a + b a = b b = c for a in range(50, 64, 4): print(a, end=',') for a in range(64, 74, 4): print(a, end=',') for a in range(74, 84, 4): print(a, end=',') for a in range(84, 94, 4): print(a, end=',') for a in range(94, 103, 4): print(a, end=',')
code
49127503/cell_5
[ "text_plain_output_1.png" ]
for i in range(10, 0, -1): print(' ' * (i - 1) + '*' * (11 - i) + '*' * (10 - i)) for i in range(10, 0, -1): print(' ' * (10 - i) + '*' * i + '*' * (i - 1))
code
50231823/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target)
code
50231823/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target)
code
50231823/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.head()
code
50231823/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape
code
50231823/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape df['target'].value_counts()
code
50231823/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.cp, df.target) pd.crosstab(df.cp, df.target).plot(kind='bar', rot=0, xlabel='Chest Pain', ylabel='Frequency', title='Frequency Graph between the Chest Pain and Target')
code
50231823/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.shape pd.crosstab(df.sex, df.target) pd.crosstab(df.sex, df.target).plot(kind='bar', rot=0, ylabel='Frequency', xlabel='Sex', title='Frequency graph between the Sex and Target')
code
50231823/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heart-disease-uci/heart.csv') df.describe()
code
16150211/cell_42
[ "text_html_output_1.png" ]
from keras.layers import Input,Dense from keras.models import Model from keras.optimizers import Nadam import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['start_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['depart_time_hr_sin'] = hr_sin df['depart_time_hr_cos'] = hr_cos df['depart_time_min_sin'] = min_sin df['depart_time_min_cos'] = min_cos hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['end_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['arrival_time_hr_sin'] = hr_sin df['arrival_time_hr_cos'] = hr_cos df['arrival_time_min_sin'] = min_sin df['arrival_time_min_cos'] = min_cos df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap = "Greens", linewidths=.5, fmt= '.2f',ax = ax) plt.show() data = df.values Y = data[:, 3] X = np.delete(data, 3, 1) x_train = X[:2223708] y_train = Y[:2223708] x_validation = X[2223708:2246398] y_validation = Y[2223708:2246398] x_test = X[2246398:] y_test = Y[2246398:] input_layer = Input((X.shape[1],)) y = Dense(64, kernel_initializer='he_normal', activation='tanh')(input_layer) y = Dense(8, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='tanh')(y) model = Model(inputs=input_layer, outputs=y) model.compile(Nadam(), loss='mse') model.summary()
code
16150211/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f, ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap='Blues', linewidths=0.5, fmt='.2f', ax=ax) plt.show()
code
16150211/cell_33
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f, ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap='Greens', linewidths=0.5, fmt='.2f', ax=ax) plt.show()
code
16150211/cell_44
[ "text_html_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Input,Dense from keras.models import Model from keras.optimizers import Nadam import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['start_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['depart_time_hr_sin'] = hr_sin df['depart_time_hr_cos'] = hr_cos df['depart_time_min_sin'] = min_sin df['depart_time_min_cos'] = min_cos hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['end_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['arrival_time_hr_sin'] = hr_sin df['arrival_time_hr_cos'] = hr_cos df['arrival_time_min_sin'] = min_sin df['arrival_time_min_cos'] = min_cos df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap = "Greens", linewidths=.5, fmt= '.2f',ax = ax) plt.show() data = df.values Y = data[:, 3] X = np.delete(data, 3, 1) x_train = X[:2223708] y_train = Y[:2223708] x_validation = X[2223708:2246398] y_validation = Y[2223708:2246398] x_test = X[2246398:] y_test = Y[2246398:] input_layer = Input((X.shape[1],)) y = Dense(64, kernel_initializer='he_normal', activation='tanh')(input_layer) y = Dense(8, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='tanh')(y) model = Model(inputs=input_layer, outputs=y) model.compile(Nadam(), loss='mse') model.summary() history = model.fit(x_train, y_train, validation_data=(x_validation, y_validation), epochs=100, batch_size=2048, callbacks=[ModelCheckpoint('best_model.hdf5', monitor='val_loss', mode='min')])
code
16150211/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import math import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['start_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['depart_time_hr_sin'] = hr_sin df['depart_time_hr_cos'] = hr_cos df['depart_time_min_sin'] = min_sin df['depart_time_min_cos'] = min_cos hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['end_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['arrival_time_hr_sin'] = hr_sin df['arrival_time_hr_cos'] = hr_cos df['arrival_time_min_sin'] = min_sin df['arrival_time_min_cos'] = min_cos
code
16150211/cell_48
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Input,Dense from keras.models import Model from keras.optimizers import Nadam from sklearn.preprocessing import MinMaxScaler import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) k = df['train_type'].unique() l = [x for x in range(len(k))] df['train_type'].replace(k, l, inplace=True) k = df['train_class'].unique() l = [x for x in range(len(k))] df['train_class'].replace(k, l, inplace=True) k = df['fare'].unique() l = [x for x in range(len(k))] df['fare'].replace(k, l, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['start_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['depart_time_hr_sin'] = hr_sin df['depart_time_hr_cos'] = hr_cos df['depart_time_min_sin'] = min_sin df['depart_time_min_cos'] = min_cos hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['end_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['arrival_time_hr_sin'] = hr_sin df['arrival_time_hr_cos'] = hr_cos df['arrival_time_min_sin'] = min_sin df['arrival_time_min_cos'] = min_cos df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap = "Greens", linewidths=.5, fmt= '.2f',ax = ax) plt.show() places_sc = MinMaxScaler(copy=False) train_type_sc = MinMaxScaler(copy=False) train_class_sc = MinMaxScaler(copy=False) fare_sc = MinMaxScaler(copy=False) weekday_sc = MinMaxScaler(copy=False) duration_sc = MinMaxScaler(copy=False) price_sc = MinMaxScaler(copy=False) df['origin'] = places_sc.fit_transform(df['origin'].values.reshape(-1, 1)) df['destination'] = places_sc.fit_transform(df['destination'].values.reshape(-1, 1)) df['train_type'] = train_type_sc.fit_transform(df['train_type'].values.reshape(-1, 1)) df['train_class'] = train_class_sc.fit_transform(df['train_class'].values.reshape(-1, 1)) df['fare'] = fare_sc.fit_transform(df['fare'].values.reshape(-1, 1)) df['start_weekday'] = weekday_sc.fit_transform(df['start_weekday'].values.reshape(-1, 1)) df['end_weekday'] = weekday_sc.fit_transform(df['end_weekday'].values.reshape(-1, 1)) df['duration'] = duration_sc.fit_transform(df['duration'].values.reshape(-1, 1)) df['price'] = price_sc.fit_transform(df['price'].values.reshape(-1, 1)) data = df.values Y = data[:, 3] X = np.delete(data, 3, 1) x_train = X[:2223708] y_train = Y[:2223708] x_validation = X[2223708:2246398] y_validation = Y[2223708:2246398] x_test = X[2246398:] y_test = Y[2246398:] input_layer = Input((X.shape[1],)) y = Dense(64, kernel_initializer='he_normal', activation='tanh')(input_layer) y = Dense(8, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='tanh')(y) model = Model(inputs=input_layer, outputs=y) model.compile(Nadam(), loss='mse') model.summary() history = model.fit(x_train, y_train, validation_data=(x_validation, y_validation), epochs=100, batch_size=2048, callbacks=[ModelCheckpoint('best_model.hdf5', monitor='val_loss', mode='min')]) model.load_weights('best_model.hdf5') scores = model.evaluate(x_test, y_test) print('Test Set RMSE(before scaling ):', scores) pred = model.predict(x_test) y_test = y_test.reshape(22692, 1) k = y_test - pred k = price_sc.inverse_transform(k) rmse = np.sqrt(np.mean(np.square(k))) print('Test Set RMSE(after scaling) :', rmse)
code
16150211/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) df['origin'].value_counts().plot(kind='bar')
code
16150211/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) k = df['train_type'].unique() l = [x for x in range(len(k))] df['train_type'].replace(k, l, inplace=True) k = df['train_class'].unique() l = [x for x in range(len(k))] df['train_class'].replace(k, l, inplace=True) df['fare'].value_counts().plot(kind='bar') k = df['fare'].unique() l = [x for x in range(len(k))] print('Numbers used to encode different fare classes:', l) df['fare'].replace(k, l, inplace=True)
code
16150211/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import pickle import datetime import math import seaborn as sns from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt from keras.layers import Input, Dense from keras.models import Model from keras.optimizers import Nadam from keras.callbacks import ModelCheckpoint
code
16150211/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) k = df['train_type'].unique() l = [x for x in range(len(k))] df['train_type'].replace(k, l, inplace=True) df['train_class'].value_counts().plot(kind='bar') k = df['train_class'].unique() l = [x for x in range(len(k))] print('Numbers used to encode different train classes:', l) df['train_class'].replace(k, l, inplace=True)
code
16150211/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) df.head()
code
16150211/cell_38
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap = "Greens", linewidths=.5, fmt= '.2f',ax = ax) plt.show() df.head()
code
16150211/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.head()
code
16150211/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) df['train_type'].value_counts().plot(kind='bar') k = df['train_type'].unique() l = [x for x in range(len(k))] print('Numbers used to encode different train types', l) df['train_type'].replace(k, l, inplace=True)
code
16150211/cell_35
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap = "Greens", linewidths=.5, fmt= '.2f',ax = ax) plt.show() df.head()
code
16150211/cell_46
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Input,Dense from keras.models import Model from keras.optimizers import Nadam import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(6, 6)) sns.heatmap(df.corr(), annot=True, cmap = "Blues", linewidths=.5, fmt= '.2f',ax = ax) plt.show() hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['start_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['depart_time_hr_sin'] = hr_sin df['depart_time_hr_cos'] = hr_cos df['depart_time_min_sin'] = min_sin df['depart_time_min_cos'] = min_cos hr_cos = [] hr_sin = [] min_cos = [] min_sin = [] data = df['end_date'].values for i in range(len(data)): time_obj = dt.datetime.strptime(data[i], '%Y-%m-%d %H:%M:%S') hr = time_obj.hour minute = time_obj.minute sample_hr_sin = math.sin(hr * (2.0 * math.pi / 24)) sample_hr_cos = math.cos(hr * (2.0 * math.pi / 24)) sample_min_sin = math.sin(minute * (2.0 * math.pi / 60)) sample_min_cos = math.cos(minute * (2.0 * math.pi / 60)) hr_cos.append(sample_hr_cos) hr_sin.append(sample_hr_sin) min_cos.append(sample_min_cos) min_sin.append(sample_min_sin) df['arrival_time_hr_sin'] = hr_sin df['arrival_time_hr_cos'] = hr_cos df['arrival_time_min_sin'] = min_sin df['arrival_time_min_cos'] = min_cos df.drop(['start_date'], axis=1, inplace=True) df.drop(['end_date'], axis=1, inplace=True) f,ax = plt.subplots(figsize=(20, 20)) sns.heatmap(df.corr(), annot=True, cmap = "Greens", linewidths=.5, fmt= '.2f',ax = ax) plt.show() data = df.values Y = data[:, 3] X = np.delete(data, 3, 1) x_train = X[:2223708] y_train = Y[:2223708] x_validation = X[2223708:2246398] y_validation = Y[2223708:2246398] x_test = X[2246398:] y_test = Y[2246398:] input_layer = Input((X.shape[1],)) y = Dense(64, kernel_initializer='he_normal', activation='tanh')(input_layer) y = Dense(8, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='sigmoid')(y) y = Dense(1, kernel_initializer='he_normal', activation='tanh')(y) model = Model(inputs=input_layer, outputs=y) model.compile(Nadam(), loss='mse') model.summary() history = model.fit(x_train, y_train, validation_data=(x_validation, y_validation), epochs=100, batch_size=2048, callbacks=[ModelCheckpoint('best_model.hdf5', monitor='val_loss', mode='min')]) plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.ylabel('loss') plt.xlabel('epochs') plt.legend() plt.show()
code
16150211/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum() df.dropna(inplace=True) df.drop(['Unnamed: 0'], axis=1, inplace=True) df.drop(['insert_date'], axis=1, inplace=True) df['destination'].value_counts().plot(kind='bar')
code
16150211/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/renfe.csv') df.isna().sum()
code
90127400/cell_13
[ "text_plain_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_train = df_train.drop('id', axis=1) df_train = df_train.drop('language', axis=1) df_test = df_test.drop('language', axis=1) df_test.head()
code
90127400/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_train = df_train.drop('id', axis=1) df_train.head()
code
90127400/cell_6
[ "text_html_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_train.info()
code
90127400/cell_2
[ "text_html_output_1.png" ]
!nvidia-smi
code
90127400/cell_1
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90127400/cell_7
[ "text_html_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') print(df_test)
code
90127400/cell_8
[ "text_html_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_train.describe(include='all')
code
90127400/cell_14
[ "text_plain_output_1.png" ]
import cudf as pd import seaborn as sns df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_train = df_train.drop('id', axis=1) df_train = df_train.drop('language', axis=1) df_test = df_test.drop('language', axis=1) sns.countplot(x='lang_abv', data=df_train)
code
90127400/cell_10
[ "text_plain_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_test.describe(include='all')
code
90127400/cell_12
[ "text_plain_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') df_train = df_train.drop('id', axis=1) df_train = df_train.drop('language', axis=1) df_test = df_test.drop('language', axis=1) df_train.head()
code
90127400/cell_5
[ "text_html_output_1.png" ]
import cudf as pd df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv') df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv') print(df_train)
code
1009478/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) train_data.describe()
code
1009478/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.hist(train_data['Pclass'], color='lightblue') plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlabel('Pclass') plt.ylabel('Count') plt.grid(True) plt.tight_layout()
code
1009478/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009478/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) train_data.head()
code
128038775/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) data.head()
code
128038775/cell_26
[ "image_output_1.png" ]
from PIL.ImageDraw import Draw import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) draw_bboxes(example_path, results, threshold=0.5)
code
128038775/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) data.head()
code
128038775/cell_32
[ "image_output_1.png" ]
from PIL.ImageDraw import Draw import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].min(), 'path'].iloc[0] results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].max(), 'path'].iloc[0] results = detect_objects(example_path, detector) draw_bboxes(example_path, results, threshold=0.25)
code
128038775/cell_28
[ "image_output_1.png" ]
from PIL.ImageDraw import Draw import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) draw_bboxes(example_path, results, threshold=0.25)
code
128038775/cell_35
[ "image_output_1.png" ]
from PIL.ImageDraw import Draw from tqdm import tqdm import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub import time plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].min(), 'path'].iloc[0] results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].max(), 'path'].iloc[0] results = detect_objects(example_path, detector) sample = data.sample(frac=0.1) start = time.perf_counter() objects = [] with concurrent.futures.ThreadPoolExecutor() as executor: results = [executor.submit(count_persons, path, detector, 0.25) for path in sample['path']] for f in tqdm(concurrent.futures.as_completed(results)): objects.append(f.result()) finish = time.perf_counter() print(f'Finished in {round(finish - start, 2)} second(s).')
code
128038775/cell_31
[ "image_output_1.png" ]
from PIL.ImageDraw import Draw import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].min(), 'path'].iloc[0] results = detect_objects(example_path, detector) draw_bboxes(example_path, results, threshold=0.25)
code
128038775/cell_24
[ "image_output_1.png" ]
from PIL.ImageDraw import Draw import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) draw_bboxes(example_path, results)
code
128038775/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) plt.hist(data['count'], bins=20) plt.axvline(data.describe().loc['mean', 'count'], label='Mean value', color='green') plt.legend() plt.xlabel('Number of people') plt.ylabel('Frequency') plt.title('Target Values') plt.show()
code
128038775/cell_37
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from PIL.ImageDraw import Draw from tqdm import tqdm import PIL import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub import time plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) MODEL_PATH = 'https://tfhub.dev/tensorflow/efficientdet/d0/1' detector = hub.load(MODEL_PATH) def detect_objects(path, model): """Извлекает изображение из файла, добавляет новую ось и применяет модель на объект. """ image_tensor = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)[tf.newaxis, ...] return model(image_tensor) def count_persons(path, model, threshold=0.0): """Считает количество людей на изображении. """ results = detect_objects(path, model) return (results['detection_classes'].numpy()[0] == 1)[np.where(results['detection_scores'].numpy()[0] > threshold)].sum() def draw_boxes(image_path, data, threshold=0.0): """Возвращает изображения с прямоугольниками поверх каждого обнаруженного человека. """ image = PIL.Image.open(image_path) draw = Draw(image) im_width, im_height = image.size boxes = data['detection_boxes'].numpy()[0] classes = data['detection_classes'].numpy()[0] scores = data['detection_scores'].numpy()[0] for i in range(int(data['num_detections'][0])): if classes[i] == 1 and scores[i] > threshold: ymin, xmin, ymax, xmax = boxes[i] left, right, top, bottom = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=4, fill='red') return image example_path = '../input/crowd-counting/frames/frames/seq_000010.jpg' results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].min(), 'path'].iloc[0] results = detect_objects(example_path, detector) example_path = data.loc[data['count'] == data['count'].max(), 'path'].iloc[0] results = detect_objects(example_path, detector) sample = data.sample(frac=0.1) start = time.perf_counter() objects = [] with concurrent.futures.ThreadPoolExecutor() as executor: results = [executor.submit(count_persons, path, detector, 0.25) for path in sample['path']] for f in tqdm(concurrent.futures.as_completed(results)): objects.append(f.result()) finish = time.perf_counter() sample['prediction'] = objects sample.head(10)
code
128038775/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams.update({'font.size': 14}) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.options.display.float_format = '{:.4f}'.format META_FILE = '../input/crowd-counting/labels.csv' data = pd.read_csv(META_FILE) def reconstruct_path(image_id): """Превращает номерной ID изображения в относительный путь. """ image_id = str(image_id).rjust(6, '0') return f'../input/crowd-counting/frames/frames/seq_{image_id}.jpg' data['path'] = data['id'].apply(reconstruct_path) data.describe()
code
2012216/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('../input/mushrooms.csv') data_df.info()
code
2012216/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() s = s.values / s.sum() df = pd.DataFrame(columns=['col', 'val', 'mean_y', 'persent'], index=range(len(m))) df['col'] = c df['val'] = m.index.values df['mean_y'] = m.values df['persent'] = s stats_df.append(df) stats_df = pd.concat(stats_df, axis=0) for c in single_val_c.keys(): print('The column %s only has one unique value with %r' % (c, single_val_c[c]))
code
2012216/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() s = s.values / s.sum() df = pd.DataFrame(columns=['col', 'val', 'mean_y', 'persent'], index=range(len(m))) df['col'] = c df['val'] = m.index.values df['mean_y'] = m.values df['persent'] = s stats_df.append(df) stats_df = pd.concat(stats_df, axis=0) stats_df.head()
code
2012216/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output np.set_printoptions(suppress=True, linewidth=300) pd.options.display.float_format = lambda x: '%0.6f' % x print(check_output(['ls', '../input']).decode('utf-8'))
code
2012216/cell_5
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_21.png", "image_output_7.png", "image_output_20.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_df = pd.read_csv('../input/mushrooms.csv') data_df['y'] = data_df['class'].map({'p': 1, 'e': 0}) columns = [c for c in data_df.columns if not c in ('class', 'y')] stats_df = [] single_val_c = {} for i, c in enumerate(columns): if data_df[c].nunique() == 1: single_val_c[c] = data_df[c].unique()[0] continue gb = data_df.groupby(c) m = gb['y'].mean() s = gb.size() s = s.values / s.sum() df = pd.DataFrame(columns=['col', 'val', 'mean_y', 'persent'], index=range(len(m))) df['col'] = c df['val'] = m.index.values df['mean_y'] = m.values df['persent'] = s stats_df.append(df) sns.barplot(x=m.index, y=m) plt.show() stats_df = pd.concat(stats_df, axis=0)
code
89136278/cell_6
[ "image_output_1.png" ]
!dir
code
89136278/cell_18
[ "text_plain_output_1.png" ]
import torch import torchvision.models as models device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cnn = models.vgg19(pretrained=True).features.to(device).eval()
code
89136278/cell_28
[ "image_output_2.png", "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision.models as models import torchvision.transforms as transforms device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') imsize = (512, 220) if torch.cuda.is_available() else (128, 220) loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()]) def image_loader(image_name): image = Image.open(image_name) image = loader(image).unsqueeze(0) return image.to(device, torch.float) style_img = image_loader('./style.jpg') content_img = image_loader('./content.jpg') assert style_img.size() == content_img.size(), 'we need to import style and content images of the same size' unloader = transforms.ToPILImage() plt.ion() def imshow(tensor, title=None): image = tensor.cpu().clone() image = image.squeeze(0) image = unloader(image) plt.pause(0.001) class ContentLoss(nn.Module): def __init__(self, target): super(ContentLoss, self).__init__() self.target = target.detach() def forward(self, input): self.loss = F.mse_loss(input, self.target) return input def gram_matrix(input): a, b, c, d = input.size() features = input.view(a * b, c * d) G = torch.mm(features, features.t()) return G.div(a * b * c * d) class StyleLoss(nn.Module): def __init__(self, target_feature): super(StyleLoss, self).__init__() self.target = gram_matrix(target_feature).detach() def forward(self, input): G = gram_matrix(input) self.loss = F.mse_loss(G, self.target) return input cnn = models.vgg19(pretrained=True).features.to(device).eval() cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device) cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device) class Normalization(nn.Module): def __init__(self, mean, std): super(Normalization, self).__init__() self.mean = torch.tensor(mean).view(-1, 1, 1) self.std = torch.tensor(std).view(-1, 1, 1) def forward(self, img): return (img - self.mean) / self.std content_layers_default = ['conv_4'] style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] def get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img, content_layers=content_layers_default, style_layers=style_layers_default): normalization = Normalization(normalization_mean, normalization_std).to(device) content_losses = [] style_losses = [] model = nn.Sequential(normalization) i = 0 for layer in cnn.children(): if isinstance(layer, nn.Conv2d): i += 1 name = 'conv_{}'.format(i) elif isinstance(layer, nn.ReLU): name = 'relu_{}'.format(i) layer = nn.ReLU(inplace=False) elif isinstance(layer, nn.MaxPool2d): name = 'pool_{}'.format(i) elif isinstance(layer, nn.BatchNorm2d): name = 'bn_{}'.format(i) else: raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__)) model.add_module(name, layer) if name in content_layers: target = model(content_img).detach() content_loss = ContentLoss(target) model.add_module('content_loss_{}'.format(i), content_loss) content_losses.append(content_loss) if name in style_layers: target_feature = model(style_img).detach() style_loss = StyleLoss(target_feature) model.add_module('style_loss_{}'.format(i), style_loss) style_losses.append(style_loss) for i in range(len(model) - 1, -1, -1): if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss): break model = model[:i + 1] return (model, style_losses, content_losses) input_img = content_img.clone() def get_input_optimizer(input_img): optimizer = optim.LBFGS([input_img]) return optimizer def run_style_transfer(cnn, normalization_mean, normalization_std, content_img, style_img, input_img, num_steps=300, style_weight=1000000, content_weight=1): """Run the style transfer.""" model, style_losses, content_losses = get_style_model_and_losses(cnn, normalization_mean, normalization_std, style_img, content_img) input_img.requires_grad_(True) model.requires_grad_(False) optimizer = get_input_optimizer(input_img) run = [0] while run[0] <= num_steps: def closure(): with torch.no_grad(): input_img.clamp_(0, 1) optimizer.zero_grad() model(input_img) style_score = 0 content_score = 0 for sl in style_losses: style_score += sl.loss for cl in content_losses: content_score += cl.loss style_score *= style_weight content_score *= content_weight loss = style_score + content_score loss.backward() run[0] += 1 return style_score + content_score optimizer.step(closure) with torch.no_grad(): input_img.clamp_(0, 1) return input_img output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std, content_img, style_img, input_img) plt.figure() imshow(output, title='Output Image') plt.ioff() plt.show()
code
89136278/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image import torch import torchvision.transforms as transforms device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') imsize = (512, 220) if torch.cuda.is_available() else (128, 220) loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()]) def image_loader(image_name): image = Image.open(image_name) image = loader(image).unsqueeze(0) return image.to(device, torch.float) style_img = image_loader('./style.jpg') content_img = image_loader('./content.jpg') print(style_img.size(), content_img.size()) assert style_img.size() == content_img.size(), 'we need to import style and content images of the same size'
code
89136278/cell_22
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import torch import torchvision.transforms as transforms device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') imsize = (512, 220) if torch.cuda.is_available() else (128, 220) loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()]) def image_loader(image_name): image = Image.open(image_name) image = loader(image).unsqueeze(0) return image.to(device, torch.float) style_img = image_loader('./style.jpg') content_img = image_loader('./content.jpg') assert style_img.size() == content_img.size(), 'we need to import style and content images of the same size' unloader = transforms.ToPILImage() plt.ion() def imshow(tensor, title=None): image = tensor.cpu().clone() image = image.squeeze(0) image = unloader(image) plt.pause(0.001) input_img = content_img.clone() plt.figure() imshow(input_img, title='Input Image')
code