path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2043738/cell_3
[ "text_plain_output_1.png" ]
import calendar import calendar year = 2018 month = 1 for i in range(12): print(calendar.month(year, month)) month += 1
code
32068018/cell_9
[ "image_output_1.png" ]
import csv import numpy as np ROOT_PATH = '/kaggle/input/CORD-19-research-challenge/' METADATA_PATH = f'{ROOT_PATH}/metadata.csv' SAMPLE_SIZE = None def create_embedding_dict(filepath, sample_size=None): """function to create embedding dictionary using given file""" embedding_dict = {} with open(filepath) as csvfile: reader = csv.reader(csvfile, delimiter=',') for i, row in enumerate(reader): if i == sample_size: break embed = np.zeros((768,)) for idx, val in enumerate(row): if idx > 0: embed[idx - 1] = float(val) embedding_dict[row[0]] = embed return embedding_dict embedding_dict = create_embedding_dict(f'{ROOT_PATH}/cord19_specter_embeddings_2020-04-10/cord19_specter_embeddings_2020-04-10.csv', sample_size=SAMPLE_SIZE) embedding_mat = np.array(list(embedding_dict.values())) (embedding_mat.shape, len(embedding_dict))
code
32068018/cell_29
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import csv import matplotlib.pyplot as plt import numpy as np import seaborn as sns import skfuzzy as fuzz ROOT_PATH = '/kaggle/input/CORD-19-research-challenge/' METADATA_PATH = f'{ROOT_PATH}/metadata.csv' SAMPLE_SIZE = None def create_embedding_dict(filepath, sample_size=None): """function to create embedding dictionary using given file""" embedding_dict = {} with open(filepath) as csvfile: reader = csv.reader(csvfile, delimiter=',') for i, row in enumerate(reader): if i == sample_size: break embed = np.zeros((768,)) for idx, val in enumerate(row): if idx > 0: embed[idx - 1] = float(val) embedding_dict[row[0]] = embed return embedding_dict embedding_dict = create_embedding_dict(f'{ROOT_PATH}/cord19_specter_embeddings_2020-04-10/cord19_specter_embeddings_2020-04-10.csv', sample_size=SAMPLE_SIZE) embedding_mat = np.array(list(embedding_dict.values())) (embedding_mat.shape, len(embedding_dict)) n_clusters = 10 def fuzzy_clustering(all_embedding, n_clusters): centroids, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(data=all_embedding.T, c=n_clusters, m=2, error=0.5, maxiter=100, init=None) clusters = np.argmax(u, axis=0) return (clusters, centroids) def get_pca(all_embedding): pca = PCA() pca_result = pca.fit_transform(all_embedding) return pca_result def plot_pca(pca_result, clusters, title): sns.set(rc={'figure.figsize': (10, 10)}) palette = sns.color_palette('bright', len(set(clusters))) kmeans_pca = get_pca(embedding_mat) plot_pca(kmeans_pca, kmeans_clusters, 'PCA Covid-19 Articles - Clustered(kmeans)')
code
32068018/cell_2
[ "text_html_output_1.png" ]
# install required packages that are not available in the given environment !pip install scikit-fuzzy
code
32068018/cell_19
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import csv import matplotlib.pyplot as plt import numpy as np import seaborn as sns import skfuzzy as fuzz ROOT_PATH = '/kaggle/input/CORD-19-research-challenge/' METADATA_PATH = f'{ROOT_PATH}/metadata.csv' SAMPLE_SIZE = None def create_embedding_dict(filepath, sample_size=None): """function to create embedding dictionary using given file""" embedding_dict = {} with open(filepath) as csvfile: reader = csv.reader(csvfile, delimiter=',') for i, row in enumerate(reader): if i == sample_size: break embed = np.zeros((768,)) for idx, val in enumerate(row): if idx > 0: embed[idx - 1] = float(val) embedding_dict[row[0]] = embed return embedding_dict embedding_dict = create_embedding_dict(f'{ROOT_PATH}/cord19_specter_embeddings_2020-04-10/cord19_specter_embeddings_2020-04-10.csv', sample_size=SAMPLE_SIZE) embedding_mat = np.array(list(embedding_dict.values())) (embedding_mat.shape, len(embedding_dict)) n_clusters = 10 def fuzzy_clustering(all_embedding, n_clusters): centroids, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(data=all_embedding.T, c=n_clusters, m=2, error=0.5, maxiter=100, init=None) clusters = np.argmax(u, axis=0) return (clusters, centroids) def get_pca(all_embedding): pca = PCA() pca_result = pca.fit_transform(all_embedding) return pca_result def plot_pca(pca_result, clusters, title): sns.set(rc={'figure.figsize': (10, 10)}) palette = sns.color_palette('bright', len(set(clusters))) fuzzy_pca = get_pca(embedding_mat) plot_pca(fuzzy_pca, fuzzy_clusters, 'PCA Covid-19 Articles - Clustered(Fuzzy C-Means)')
code
32068018/cell_5
[ "image_output_1.png" ]
import pandas as pd ROOT_PATH = '/kaggle/input/CORD-19-research-challenge/' METADATA_PATH = f'{ROOT_PATH}/metadata.csv' SAMPLE_SIZE = None meta_df = pd.read_csv(METADATA_PATH, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str}) meta_df.head()
code
16123280/cell_21
[ "text_plain_output_1.png" ]
size_list = [i for i in range(1, 10)] size_list
code
16123280/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data['DIGITO'].loc[data['DIGITO'] == '0'].count()
code
16123280/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape new_num_address = num_address['DIGITO'].astype(str) print(type(new_num_address))
code
16123280/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.info()
code
16123280/cell_23
[ "text_plain_output_1.png" ]
import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # seaborn to create grafic data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape import math array = [float(i) for i in range(1, 10)] percent_bf = [math.log10(1 + 1 / d) for d in array] percent_bf fd_teorico = [] for i in range(len(percent_bf)): fd_teorico.append(len(num_address) * percent_bf[i]) fd_teorico fd_plot = [] for i in range(len(percent_bf)): fd_plot.append(fd_teorico[i] / len(num_address)) size_list = [i for i in range(1, 10)] size_list sns.barplot(x=size_list, y=fd_plot)
code
16123280/cell_20
[ "text_plain_output_1.png" ]
import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape import math array = [float(i) for i in range(1, 10)] percent_bf = [math.log10(1 + 1 / d) for d in array] percent_bf fd_teorico = [] for i in range(len(percent_bf)): fd_teorico.append(len(num_address) * percent_bf[i]) fd_teorico fd_plot = [] for i in range(len(percent_bf)): fd_plot.append(fd_teorico[i] / len(num_address)) print(fd_plot)
code
16123280/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape new_num_address = num_address['DIGITO'].astype(str) new_num_address.loc[new_num_address == '0'].count()
code
16123280/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.head()
code
16123280/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values
code
16123280/cell_19
[ "text_plain_output_1.png" ]
import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape import math array = [float(i) for i in range(1, 10)] percent_bf = [math.log10(1 + 1 / d) for d in array] percent_bf fd_teorico = [] for i in range(len(percent_bf)): fd_teorico.append(len(num_address) * percent_bf[i]) fd_teorico
code
16123280/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pylab as plt import os print(os.listdir('../input'))
code
16123280/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) data.head()
code
16123280/cell_18
[ "text_plain_output_1.png" ]
import math import math array = [float(i) for i in range(1, 10)] percent_bf = [math.log10(1 + 1 / d) for d in array] percent_bf
code
16123280/cell_8
[ "text_plain_output_1.png" ]
[str(x) for x in range(10)]
code
16123280/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape
code
16123280/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape num_address['DIGITO'].head()
code
16123280/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.STREET.value_counts().head()
code
16123280/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape len(num_address)
code
16123280/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape type(num_address)
code
16123280/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape
code
16123280/cell_22
[ "text_plain_output_1.png" ]
import math import matplotlib.pylab as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0] data.shape num_address = data[data['DIGITO'] != '0'] num_address.shape import math array = [float(i) for i in range(1, 10)] percent_bf = [math.log10(1 + 1 / d) for d in array] percent_bf fd_teorico = [] for i in range(len(percent_bf)): fd_teorico.append(len(num_address) * percent_bf[i]) fd_teorico fd_plot = [] for i in range(len(percent_bf)): fd_plot.append(fd_teorico[i] / len(num_address)) size_list = [i for i in range(1, 10)] size_list plt.bar(size_list, fd_plot) plt.show
code
16123280/cell_10
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) for num in [str(x) for x in range(10)]: print(num, data[data['DIGITO'] == num].shape[0], data[data['DIGITO'] == num].shape[0] / data.shape[0])
code
16123280/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data.NUMBER = data.NUMBER.astype(str) data.NUMBER = data.NUMBER.str.strip(' ') data['DIGITO'] = data.NUMBER.map(lambda x: x[0]) index_drop = data[data.DIGITO == '0'].index.values index_drop data.drop(index_drop, axis=0, inplace=True) experemental_values = data.DIGITO.value_counts() experemental_values data[data.DIGITO == '1'].shape[0]
code
16123280/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/argentina.csv') data['NUMBER'].head()
code
2034808/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import log_loss from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2034808/cell_7
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sampleSubmission = pd.read_csv('../input/sample_submission.csv') col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] trainTxt = train['comment_text'] testTxt = test['comment_text'] trainTxt = trainTxt.fillna('unknown') testTxt = testTxt.fillna('unknown') combinedTxt = pd.concat([trainTxt, testTxt], axis=0) vect = TfidfVectorizer(decode_error='ignore', use_idf=True, smooth_idf=True, min_df=10, ngram_range=(1, 3), lowercase=True, stop_words='english') combinedDtm = vect.fit_transform(combinedTxt) trainDtm = combinedDtm[:train.shape[0]] testDtm = vect.transform(testTxt) preds = np.zeros((test.shape[0], len(col))) loss = [] for i, j in enumerate(col): lr = LogisticRegression(C=4) lr.fit(trainDtm, train[j]) preds[:, i] = lr.predict_proba(testDtm)[:, 1] train_preds = lr.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) loss
code
18146642/cell_4
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd data = pd.read_csv('../input/train.csv') labels = data['target'].values data = data.drop(['id', 'target'], axis=1).values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2) from sklearn.linear_model import LogisticRegression clf = LogisticRegression(solver='saga', multi_class='multinomial') clf.fit(X_train, y_train)
code
18146642/cell_6
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd data = pd.read_csv('../input/train.csv') labels = data['target'].values data = data.drop(['id', 'target'], axis=1).values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2) from sklearn.linear_model import LogisticRegression clf = LogisticRegression(solver='saga', multi_class='multinomial') clf.fit(X_train, y_train) def evaluate(pred, labels): return sum(pred == labels) / len(pred) pred = clf.predict(X_test) print('Accuracy:', evaluate(pred, y_test))
code
18146642/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18146642/cell_7
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd data = pd.read_csv('../input/train.csv') labels = data['target'].values data = data.drop(['id', 'target'], axis=1).values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_jobs=-1, n_estimators=5) rfc.fit(X_train, y_train)
code
18146642/cell_8
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd data = pd.read_csv('../input/train.csv') labels = data['target'].values data = data.drop(['id', 'target'], axis=1).values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2) from sklearn.linear_model import LogisticRegression clf = LogisticRegression(solver='saga', multi_class='multinomial') clf.fit(X_train, y_train) def evaluate(pred, labels): return sum(pred == labels) / len(pred) pred = clf.predict(X_test) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_jobs=-1, n_estimators=5) rfc.fit(X_train, y_train) pred = rfc.predict(X_test) print('Accuracy:', evaluate(pred, y_test))
code
18146642/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd data = pd.read_csv('../input/train.csv') labels = data['target'].values data = data.drop(['id', 'target'], axis=1).values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
code
18146642/cell_10
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd data = pd.read_csv('../input/train.csv') labels = data['target'].values data = data.drop(['id', 'target'], axis=1).values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2) from sklearn.linear_model import LogisticRegression clf = LogisticRegression(solver='saga', multi_class='multinomial') clf.fit(X_train, y_train) def evaluate(pred, labels): return sum(pred == labels) / len(pred) pred = clf.predict(X_test) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_jobs=-1, n_estimators=5) rfc.fit(X_train, y_train) pred = rfc.predict(X_test) unknown = pd.read_csv('../input/test.csv') ids = unknown['id'] unknown = unknown.drop(['id'], axis=1).values pred = rfc.predict(unknown) out = pd.DataFrame(columns=['Id', 'Prediction']) out['Id'] = ids out['Prediction'] = pred print(out.info()) out.head(10)
code
18131067/cell_13
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) list_opening_moves = ['e4', 'd4', 'Nf', 'other'] opening_moves = {} for move in list_opening_moves: if move != 'other': opening_moves[move] = df_games[df_games['moves'].str.slice(0, 2, 1) == move].count()['moves'] else: opening_moves[move] = df_games[~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] print(opening_moves)
code
18131067/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) print(df_games.columns)
code
18131067/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] print(df_games.columns)
code
18131067/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) colors = ['blue', 'orange'] plt.pie([minis, games - minis], labels=['Minis', 'Not Minis'], colors=colors, startangle=90, autopct='%.1f%%') plt.show()
code
18131067/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) ends = df_games['winner'].unique() finish_by_control = {} control_totals = {} for control in game_types: finish_by_control[control] = {} control_totals[control] = 0 for end in ends: finish_by_control[control][end] = df_games[(df_games['winner'] == end) & (df_games['time_control_type'] == control)].count()['winner'] control_totals[control] = finish_by_control[control][end] + control_totals[control] end_percents = {} for control in game_types: end_percents[control] = {} for end in ends: end_percents[control][end] = round(finish_by_control[control][end] / control_totals[control] * 100, 2) end_percents
code
18131067/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
18131067/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) print(temp_df.head(10))
code
18131067/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) ends = df_games['winner'].unique() finish_by_control = {} control_totals = {} for control in game_types: finish_by_control[control] = {} control_totals[control] = 0 for end in ends: finish_by_control[control][end] = df_games[(df_games['winner'] == end) & (df_games['time_control_type'] == control)].count()['winner'] control_totals[control] = finish_by_control[control][end] + control_totals[control] list_opening_moves = ['e4', 'd4', 'Nf', 'other'] opening_moves = {} for move in list_opening_moves: if move != 'other': opening_moves[move] = df_games[df_games['moves'].str.slice(0, 2, 1) == move].count()['moves'] else: opening_moves[move] = df_games[~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] result_by_opening_move = {} for opening in list_opening_moves: result_by_opening_move[opening] = {} for end in ends: if opening != 'other': result_by_opening_move[opening][end] = df_games[(df_games['winner'] == end) & (df_games['moves'].str.slice(0, 2, 1) == opening)].count()['moves'] else: result_by_opening_move[opening][end] = df_games[(df_games['winner'] == end) & ~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] result_by_opening_move move_percentage = {} for move in list_opening_moves: move_percentage[move] = {} for end in ends: move_percentage[move][end] = round(result_by_opening_move[move][end] / opening_moves[move] * 100, 2) move_percentage
code
18131067/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) colors = ['blue', 'orange'] time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) ends = df_games['winner'].unique() finish_by_control = {} control_totals = {} for control in game_types: finish_by_control[control] = {} control_totals[control] = 0 for end in ends: finish_by_control[control][end] = df_games[(df_games['winner'] == end) & (df_games['time_control_type'] == control)].count()['winner'] control_totals[control] = finish_by_control[control][end] + control_totals[control] end_percents = {} for control in game_types: end_percents[control] = {} for end in ends: end_percents[control][end] = round(finish_by_control[control][end] / control_totals[control] * 100, 2) end_percents #graphing the win rates grouped by time control #bar widiths and postitions bar_width=0.25 bpos=np.arange(len(end_percents.keys())) #create the bars! fig,ax=plt.subplots() white = ax.bar(bpos-bar_width,[end_percents[control]['white'] for control in game_types],bar_width,label='White') black = ax.bar(bpos,[end_percents[control]['black'] for control in game_types],bar_width,label='Black') draws = ax.bar(bpos+bar_width,[end_percents[control]['draw'] for control in game_types],bar_width,label='Draw') # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Win Percentage') ax.set_title('Win Percentage by Control Type') ax.set_xticks(bpos) ax.set_xticklabels([x.capitalize() for x in game_types]) ax.legend() fig.tight_layout() plt.show() list_opening_moves = ['e4', 'd4', 'Nf', 'other'] opening_moves = {} for move in list_opening_moves: if move != 'other': opening_moves[move] = df_games[df_games['moves'].str.slice(0, 2, 1) == move].count()['moves'] else: opening_moves[move] = df_games[~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] result_by_opening_move = {} for opening in list_opening_moves: result_by_opening_move[opening] = {} for end in ends: if opening != 'other': result_by_opening_move[opening][end] = df_games[(df_games['winner'] == end) & (df_games['moves'].str.slice(0, 2, 1) == opening)].count()['moves'] else: result_by_opening_move[opening][end] = df_games[(df_games['winner'] == end) & ~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] result_by_opening_move move_percentage = {} for move in list_opening_moves: move_percentage[move] = {} for end in ends: move_percentage[move][end] = round(result_by_opening_move[move][end] / opening_moves[move] * 100, 2) move_percentage bar_width = 0.25 bpos = np.arange(len(move_percentage.keys())) fig, ax = plt.subplots() white = ax.bar(bpos - bar_width, [move_percentage[move]['white'] for move in list_opening_moves], bar_width, label='White') black = ax.bar(bpos, [move_percentage[move]['black'] for move in list_opening_moves], bar_width, label='Black') draws = ax.bar(bpos + bar_width, [move_percentage[move]['draw'] for move in list_opening_moves], bar_width, label='Draw') ax.set_ylabel('Win Percentage') ax.set_title('Win Percentage by Move') ax.set_xticks(bpos) ax.set_xticklabels(['King Pawn', 'Queen Pawn', 'Reti', 'Other']) ax.legend() fig.tight_layout() plt.show()
code
18131067/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') print(df_games.columns)
code
18131067/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) ends = df_games['winner'].unique() finish_by_control = {} control_totals = {} for control in game_types: finish_by_control[control] = {} control_totals[control] = 0 for end in ends: finish_by_control[control][end] = df_games[(df_games['winner'] == end) & (df_games['time_control_type'] == control)].count()['winner'] control_totals[control] = finish_by_control[control][end] + control_totals[control] list_opening_moves = ['e4', 'd4', 'Nf', 'other'] opening_moves = {} for move in list_opening_moves: if move != 'other': opening_moves[move] = df_games[df_games['moves'].str.slice(0, 2, 1) == move].count()['moves'] else: opening_moves[move] = df_games[~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] result_by_opening_move = {} for opening in list_opening_moves: result_by_opening_move[opening] = {} for end in ends: if opening != 'other': result_by_opening_move[opening][end] = df_games[(df_games['winner'] == end) & (df_games['moves'].str.slice(0, 2, 1) == opening)].count()['moves'] else: result_by_opening_move[opening][end] = df_games[(df_games['winner'] == end) & ~df_games['moves'].str.slice(0, 2, 1).isin(list_opening_moves)].count()['moves'] result_by_opening_move
code
18131067/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) ends = df_games['winner'].unique() finish_by_control = {} control_totals = {} for control in game_types: finish_by_control[control] = {} control_totals[control] = 0 for end in ends: finish_by_control[control][end] = df_games[(df_games['winner'] == end) & (df_games['time_control_type'] == control)].count()['winner'] control_totals[control] = finish_by_control[control][end] + control_totals[control] print(finish_by_control) print(control_totals)
code
18131067/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) colors = ['blue', 'orange'] time_control_types = {'classic': {'start': 60, 'end': 999}, 'rapid': {'start': 11, 'end': 59}, 'blitz': {'start': 5, 'end': 10}, 'bullet': {'start': 0, 'end': 4}} game_types = ['classic', 'rapid', 'blitz'] def get_time_control(minutes): if minutes >= time_control_types['classic']['start']: return 'classic' elif minutes >= time_control_types['rapid']['start']: return 'rapid' elif minutes >= time_control_types['blitz']['start']: return 'blitz' else: return 'bullet' temp_df = df_games['increment_code'].str.split('+', n=1, expand=True) temp_df[0] = temp_df[0].apply(lambda x: get_time_control(int(x))) df_games = pd.concat([df_games, temp_df[0]], axis=1) df_games.rename(columns={0: 'time_control_type'}, inplace=True) ends = df_games['winner'].unique() finish_by_control = {} control_totals = {} for control in game_types: finish_by_control[control] = {} control_totals[control] = 0 for end in ends: finish_by_control[control][end] = df_games[(df_games['winner'] == end) & (df_games['time_control_type'] == control)].count()['winner'] control_totals[control] = finish_by_control[control][end] + control_totals[control] end_percents = {} for control in game_types: end_percents[control] = {} for end in ends: end_percents[control][end] = round(finish_by_control[control][end] / control_totals[control] * 100, 2) end_percents bar_width = 0.25 bpos = np.arange(len(end_percents.keys())) fig, ax = plt.subplots() white = ax.bar(bpos - bar_width, [end_percents[control]['white'] for control in game_types], bar_width, label='White') black = ax.bar(bpos, [end_percents[control]['black'] for control in game_types], bar_width, label='Black') draws = ax.bar(bpos + bar_width, [end_percents[control]['draw'] for control in game_types], bar_width, label='Draw') ax.set_ylabel('Win Percentage') ax.set_title('Win Percentage by Control Type') ax.set_xticks(bpos) ax.set_xticklabels([x.capitalize() for x in game_types]) ax.legend() fig.tight_layout() plt.show()
code
18131067/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_games = pd.read_csv('../input/games.csv') keep = ['id', 'rated', 'turns', 'victory_status', 'winner', 'increment_code', 'white_rating', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply'] df_games = df_games[keep] df_minis = df_games[df_games['turns'] <= 30] games = len(df_games.index) minis = len(df_minis.index) percent_minis = round(minis / games * 100, 2) print('{mi}/{gmes} = {percent}% games are minis.'.format(mi=minis, gmes=games, percent=percent_minis))
code
2002649/cell_9
[ "text_plain_output_1.png" ]
from sklearn import svm from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() for col in data.columns: data[col] = labelencoder.fit_transform(data[col]) data = np.array(data) _length = 8000 train, test = (data[0:_length,], data[_length:,]) Xtrain, ytrain = (train[:, 1:], train[:, 0]) Xtest, ytest = (test[:, 1:], test[:, 0]) from sklearn import svm model = svm.SVC(kernel='linear', gamma=1) model.fit(Xtrain, ytrain) scr = model.score(Xtrain, ytrain) predicted = model.predict(Xtest) dif = predicted - ytest from keras.models import * from keras.layers import * batch_size = 1 mlp_neurons = 5 neurons = 5 bi_neurons = 5 repeats = 5 nb_epochs = 5 def mlp_model(train, batch_size, nb_epoch, neurons): X, y = (train[:, 1:], train[:, 0]) model = Sequential() model.add(Dense(neurons, input_dim=X.shape[1], init='normal', activation='relu')) model.add(Dropout(0.2)) model.add(Dense(neurons, init='normal', activation='relu')) model.add(Dropout(0.2)) model.add(Dense(neurons, init='normal', activation='relu')) model.add(Dense(1, activation='linear')) model.compile(loss='mse', optimizer='adam', metrics=['acc']) for i in range(nb_epoch): model.fit(X, y, epochs=1, batch_size=batch_size, verbose=2, shuffle=False) model.reset_states() return model mlp_RNN = mlp_model(train, batch_size, nb_epochs, mlp_neurons)
code
2002649/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() for col in data.columns: data[col] = labelencoder.fit_transform(data[col]) print(data.shape) data = np.array(data) _length = 8000 train, test = (data[0:_length,], data[_length:,]) Xtrain, ytrain = (train[:, 1:], train[:, 0]) Xtest, ytest = (test[:, 1:], test[:, 0]) print(Xtrain.shape) print(ytrain.shape) print(Xtest.shape) print(ytest.shape)
code
2002649/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.models import * from keras.layers import * batch_size = 1 mlp_neurons = 5 neurons = 5 bi_neurons = 5 repeats = 5 nb_epochs = 5
code
2002649/cell_11
[ "text_html_output_1.png" ]
from sklearn import svm from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() for col in data.columns: data[col] = labelencoder.fit_transform(data[col]) data = np.array(data) _length = 8000 train, test = (data[0:_length,], data[_length:,]) Xtrain, ytrain = (train[:, 1:], train[:, 0]) Xtest, ytest = (test[:, 1:], test[:, 0]) from sklearn import svm model = svm.SVC(kernel='linear', gamma=1) model.fit(Xtrain, ytrain) scr = model.score(Xtrain, ytrain) predicted = model.predict(Xtest) dif = predicted - ytest from keras.models import * from keras.layers import * batch_size = 1 mlp_neurons = 5 neurons = 5 bi_neurons = 5 repeats = 5 nb_epochs = 5 def mlp_model(train, batch_size, nb_epoch, neurons): X, y = (train[:, 1:], train[:, 0]) model = Sequential() model.add(Dense(neurons, input_dim=X.shape[1], init='normal', activation='relu')) model.add(Dropout(0.2)) model.add(Dense(neurons, init='normal', activation='relu')) model.add(Dropout(0.2)) model.add(Dense(neurons, init='normal', activation='relu')) model.add(Dense(1, activation='linear')) model.compile(loss='mse', optimizer='adam', metrics=['acc']) for i in range(nb_epoch): model.fit(X, y, epochs=1, batch_size=batch_size, verbose=2, shuffle=False) model.reset_states() return model def forecast_mlp(model, batch_size, row): X = row X = X.reshape(1, len(X)) yhat = model.predict(X, batch_size=batch_size) return yhat mlp_RNN = mlp_model(train, batch_size, nb_epochs, mlp_neurons) def simulated_mlp(model, train, batch_size, nb_epochs, neurons): n1 = len(Xtest) n2 = repeats predictions1 = np.zeros((n1, n2), dtype=float) for r in range(repeats): predictions2 = list() for i in range(len(Xtest)): if i == 0: y = forecast_mlp(model, batch_size, Xtest[i, :]) Xtest[i + 1, :-1] = Xtest[i, 1:] Xtest[i + 1, -1] = y predictions2.append(y) else: y = forecast_mlp(model, batch_size, Xtest[i - 1, :]) Xtest[i, :-1] = Xtest[i - 1, 1:] Xtest[i, -1] = y predictions2.append(y) predictions1[:, r] = predictions2 return np.mean(predictions1, axis=1) print(ytest) print('===================== mlp ==================================') print(simulated_mlp(mlp_RNN, train, batch_size, nb_epochs, mlp_neurons))
code
2002649/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2002649/cell_3
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() for col in data.columns: data[col] = labelencoder.fit_transform(data[col]) data.head()
code
2002649/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import svm from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() for col in data.columns: data[col] = labelencoder.fit_transform(data[col]) data = np.array(data) _length = 8000 train, test = (data[0:_length,], data[_length:,]) Xtrain, ytrain = (train[:, 1:], train[:, 0]) Xtest, ytest = (test[:, 1:], test[:, 0]) from sklearn import svm model = svm.SVC(kernel='linear', gamma=1) model.fit(Xtrain, ytrain) scr = model.score(Xtrain, ytrain) print(scr) predicted = model.predict(Xtest) print(predicted) dif = predicted - ytest print(dif)
code
16120909/cell_21
[ "text_plain_output_1.png" ]
train_y.shape
code
16120909/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns health_data.isna().sum()
code
16120909/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data.info()
code
16120909/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression train_y.shape model = LinearRegression() model.fit(train_x, train_y) model.coef_
code
16120909/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns
code
16120909/cell_26
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns health_data.isna().sum() categorical_cols = health_data.select_dtypes(exclude=np.number).columns train_y.shape model = LinearRegression() model.fit(train_x, train_y) model.coef_ model.intercept_ train_predict = model.predict(train_x) test_predict = model.predict(test_x) print('MSE - Train :', mean_squared_error(train_y, train_predict)) print('MSE - Test :', mean_squared_error(test_y, test_predict)) print('MAE - Train :', mean_absolute_error(train_y, train_predict)) print('MAE - Test :', mean_absolute_error(test_y, test_predict)) print('R2 - Train :', r2_score(train_y, train_predict)) print('R2 - Test :', r2_score(test_y, test_predict)) print('Mape - Train:', np.mean(np.abs((train_y, train_predict)))) print('Mape - Test:', np.mean(np.abs((test_y, test_predict))))
code
16120909/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16120909/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns health_data.head()
code
16120909/cell_17
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns health_data.isna().sum() categorical_cols = health_data.select_dtypes(exclude=np.number).columns categorical_cols = categorical_cols.drop(['Indicator', 'Place']) encoded_cols = pd.get_dummies(health_data[categorical_cols]) final_data = pd.concat([encoded_cols, health_data['Value']], axis=1) final_data.info()
code
16120909/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression train_y.shape model = LinearRegression() model.fit(train_x, train_y) model.coef_ model.intercept_
code
16120909/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns health_data.isna().sum() categorical_cols = health_data.select_dtypes(exclude=np.number).columns categorical_cols = categorical_cols.drop(['Indicator', 'Place']) categorical_cols
code
16120909/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression train_y.shape model = LinearRegression() model.fit(train_x, train_y)
code
16120909/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) health_data = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_data = health_data.drop(columns=['BCHC Requested Methodology', 'Source', 'Methods', 'Notes']) health_data.columns health_data.isna().sum() categorical_cols = health_data.select_dtypes(exclude=np.number).columns categorical_cols
code
72076807/cell_4
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/30-days-of-ml/test.csv') train = pd.read_csv('../input/30-days-of-ml/train.csv') y = train.target X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) test.head()
code
72076807/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72076807/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder,OrdinalEncoder from xgboost import XGBRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('../input/30-days-of-ml/test.csv') train = pd.read_csv('../input/30-days-of-ml/train.csv') y = train.target X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) integer_features = list(X.columns[X.dtypes == 'int64']) continuous_features = list(X.columns[X.dtypes == 'float64']) categorical_features = list(X.columns[X.dtypes == 'object']) OneEncoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_train_cols = pd.DataFrame(OneEncoder.fit_transform(X_train[categorical_features])) OH_valid_cols = pd.DataFrame(OneEncoder.transform(X_valid[categorical_features])) OH_train_cols.index = X_train.index OH_valid_cols.index = X_valid.index OH_numtrain_cols = X_train.drop(categorical_features, axis=1) OH_numval_cols = X_valid.drop(categorical_features, axis=1) OH_X_train = pd.concat([OH_train_cols, OH_numtrain_cols], axis=1) OH_X_valid = pd.concat([OH_valid_cols, OH_numval_cols], axis=1) model = XGBRegressor(n_estimators=700, random_state=0, learning_rate=0.05) model.fit(OH_X_train, y_train) predict = model.predict(OH_X_valid) print(mean_squared_error(predict, y_valid, squared=False))
code
33118397/cell_9
[ "text_plain_output_1.png" ]
import json import os # To walk through the data files provided import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def getResults(directory, f): results = {} for _, _, filenames in os.walk(directory): for filename in filenames: results.update(f(directory + filename)) return results results = getResults(trainingDirectory, getGridSizeComparison) print(results)
code
33118397/cell_19
[ "text_plain_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import os # To walk through the data files provided import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def getResults(directory, f): results = {} for _, _, filenames in os.walk(directory): for filename in filenames: results.update(f(directory + filename)) return results results = getResults(trainingDirectory, getGridSizeComparison) count = 0 for _, value in results.items(): if value: count += 1 # Visualise the training cases for a task # Code inspiration from https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook def plotTaskTraining(task): """ Plots the training pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap( ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) # Plot all the training cases nTrainingCases = len(task["train"]) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15,15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) def getGridSizeComparison2(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {'allCorrespond': True, 'outputsSame': True} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) if not (sameX and sameY): result['allCorrespond'] = False break outputX = None outputY = None for i in range(numTrain): trainCase = trainSection[i] trainCaseOutput = trainCase['output'] same = True if outputY == None: outputY = len(trainCaseOutput) elif not outputY == len(trainCaseOutput): same = False if outputX == None: outputX = len(trainCaseOutput[0]) elif not outputX == len(trainCaseOutput[0]): same = False if not same: result['outputsSame'] = False break return {ident: result} results = getResults(trainingDirectory, getGridSizeComparison2) countAllCorrespondFalse = 0 for _, value in results.items(): if not value['allCorrespond']: countAllCorrespondFalse += 1 countAllCorrespondFalseOutputsSameTrue = 0 for _, value in results.items(): if value['allCorrespond'] == False and value['outputsSame'] == True: countAllCorrespondFalseOutputsSameTrue += 1 print('Of the ' + str(countAllCorrespondFalse) + ' tasks where the input:output ' + 'grid sizes were not the same,\n' + str(countAllCorrespondFalseOutputsSameTrue) + ' had identical grid sizes ' + 'for all their outputs, or ' + str(round(countAllCorrespondFalseOutputsSameTrue / countAllCorrespondFalse * 100)) + '%.')
code
33118397/cell_7
[ "text_plain_output_1.png" ]
import json import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename)
code
33118397/cell_18
[ "text_plain_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import os # To walk through the data files provided import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def getResults(directory, f): results = {} for _, _, filenames in os.walk(directory): for filename in filenames: results.update(f(directory + filename)) return results results = getResults(trainingDirectory, getGridSizeComparison) count = 0 for _, value in results.items(): if value: count += 1 # Visualise the training cases for a task # Code inspiration from https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook def plotTaskTraining(task): """ Plots the training pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap( ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) # Plot all the training cases nTrainingCases = len(task["train"]) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15,15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) def getGridSizeComparison2(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {'allCorrespond': True, 'outputsSame': True} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) if not (sameX and sameY): result['allCorrespond'] = False break outputX = None outputY = None for i in range(numTrain): trainCase = trainSection[i] trainCaseOutput = trainCase['output'] same = True if outputY == None: outputY = len(trainCaseOutput) elif not outputY == len(trainCaseOutput): same = False if outputX == None: outputX = len(trainCaseOutput[0]) elif not outputX == len(trainCaseOutput[0]): same = False if not same: result['outputsSame'] = False break return {ident: result} results = getResults(trainingDirectory, getGridSizeComparison2) print(results)
code
33118397/cell_15
[ "text_plain_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) # Visualise the training cases for a task # Code inspiration from https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook def plotTaskTraining(task): """ Plots the training pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap( ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) # Plot all the training cases nTrainingCases = len(task["train"]) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15,15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) def getGridSizeComparison2(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {'allCorrespond': True, 'outputsSame': True} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) if not (sameX and sameY): result['allCorrespond'] = False break outputX = None outputY = None for i in range(numTrain): trainCase = trainSection[i] trainCaseOutput = trainCase['output'] same = True if outputY == None: outputY = len(trainCaseOutput) elif not outputY == len(trainCaseOutput): same = False if outputX == None: outputX = len(trainCaseOutput[0]) elif not outputX == len(trainCaseOutput[0]): same = False if not same: result['outputsSame'] = False break return {ident: result} filename = testDirectory + '19bb5feb.json' print(getGridSizeComparison2(filename))
code
33118397/cell_16
[ "text_plain_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) # Visualise the training cases for a task # Code inspiration from https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook def plotTaskTraining(task): """ Plots the training pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap( ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) # Plot all the training cases nTrainingCases = len(task["train"]) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15,15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) def getGridSizeComparison2(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {'allCorrespond': True, 'outputsSame': True} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) if not (sameX and sameY): result['allCorrespond'] = False break outputX = None outputY = None for i in range(numTrain): trainCase = trainSection[i] trainCaseOutput = trainCase['output'] same = True if outputY == None: outputY = len(trainCaseOutput) elif not outputY == len(trainCaseOutput): same = False if outputX == None: outputX = len(trainCaseOutput[0]) elif not outputX == len(trainCaseOutput[0]): same = False if not same: result['outputsSame'] = False break return {ident: result} filename = testDirectory + '19bb5feb.json' filename = trainingDirectory + '0b148d64.json' print(getGridSizeComparison2(filename))
code
33118397/cell_17
[ "image_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) # Visualise the training cases for a task # Code inspiration from https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook def plotTaskTraining(task): """ Plots the training pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap( ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) # Plot all the training cases nTrainingCases = len(task["train"]) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15,15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) def getGridSizeComparison2(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {'allCorrespond': True, 'outputsSame': True} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) if not (sameX and sameY): result['allCorrespond'] = False break outputX = None outputY = None for i in range(numTrain): trainCase = trainSection[i] trainCaseOutput = trainCase['output'] same = True if outputY == None: outputY = len(trainCaseOutput) elif not outputY == len(trainCaseOutput): same = False if outputX == None: outputX = len(trainCaseOutput[0]) elif not outputX == len(trainCaseOutput[0]): same = False if not same: result['outputsSame'] = False break return {ident: result} filename = testDirectory + '19bb5feb.json' filename = trainingDirectory + '0b148d64.json' filename = trainingDirectory + '0b148d64.json' task = readTaskFile(filename) plotTaskTraining(task)
code
33118397/cell_10
[ "text_plain_output_1.png" ]
import json import os # To walk through the data files provided import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) def getResults(directory, f): results = {} for _, _, filenames in os.walk(directory): for filename in filenames: results.update(f(directory + filename)) return results results = getResults(trainingDirectory, getGridSizeComparison) count = 0 for _, value in results.items(): if value: count += 1 print('Proportion of training examples with the same grid size: ' + str(round(count / len(results), 2)))
code
33118397/cell_12
[ "text_plain_output_1.png" ]
from matplotlib import colors import json import matplotlib.pyplot as plt import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename) def getGridSizeComparison(filename): data = readTaskFile(filename) trainSection = data['train'] ident = data['id'] numTrain = len(trainSection) result = {} for i in range(numTrain): trainCase = trainSection[i] trainCaseInput = trainCase['input'] trainCaseOutput = trainCase['output'] sameY = len(trainCaseInput) == len(trainCaseOutput) sameX = len(trainCaseInput[0]) == len(trainCaseOutput[0]) result[ident + '_train_' + str(i)] = sameX and sameY return result filename = testDirectory + '19bb5feb.json' getGridSizeComparison(filename) # Visualise the training cases for a task # Code inspiration from https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook def plotTaskTraining(task): """ Plots the training pairs of a specified task, using same color scheme as the ARC app """ cmap = colors.ListedColormap( ['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00', '#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25']) norm = colors.Normalize(vmin=0, vmax=9) # Plot all the training cases nTrainingCases = len(task["train"]) fig, axs = plt.subplots(nTrainingCases, 2, figsize=(15,15)) for i in range(nTrainingCases): axs[i][0].imshow(task['train'][i]['input'], cmap=cmap, norm=norm) axs[i][0].axis('off') axs[i][0].set_title('Train Input') axs[i][1].imshow(task['train'][i]['output'], cmap=cmap, norm=norm) axs[i][1].axis('off') axs[i][1].set_title('Train Output') plt.tight_layout() plt.show() filename = testDirectory + '19bb5feb.json' task = readTaskFile(filename) plotTaskTraining(task)
code
33118397/cell_5
[ "image_output_1.png" ]
import json import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' trainingDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' evaluationDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/training/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirectory + '19bb5feb.json' readTaskFile(filename)
code
34127012/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data_df = pd.read_csv('https://d17h27t6h515a5.cloudfront.net/topher/2017/October/59dd2e9a_noshowappointments-kagglev2-may-2016/noshowappointments-kagglev2-may-2016.csv') data_df.isnull().sum() data_df.dtypes
code
34127012/cell_6
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34127012/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('https://d17h27t6h515a5.cloudfront.net/topher/2017/October/59dd2e9a_noshowappointments-kagglev2-may-2016/noshowappointments-kagglev2-may-2016.csv') data_df.isnull().sum() data_df.dtypes data_df.duplicated().sum()
code
34127012/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('https://d17h27t6h515a5.cloudfront.net/topher/2017/October/59dd2e9a_noshowappointments-kagglev2-may-2016/noshowappointments-kagglev2-may-2016.csv') data_df.head(2)
code
34127012/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('https://d17h27t6h515a5.cloudfront.net/topher/2017/October/59dd2e9a_noshowappointments-kagglev2-may-2016/noshowappointments-kagglev2-may-2016.csv') data_df.isnull().sum() data_df.dtypes data_df['Neighbourhood'].unique()
code
34127012/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd data_df = pd.read_csv('https://d17h27t6h515a5.cloudfront.net/topher/2017/October/59dd2e9a_noshowappointments-kagglev2-may-2016/noshowappointments-kagglev2-may-2016.csv') data_df.isnull().sum()
code
105176386/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64') jugadores_goles = players[['MatchID', 'Player Name', 'goles']] goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner') goles inicio = goles.groupby('Player Name').agg({'Year': 'min'}) inicio = inicio.to_dict()['Year'] plt.figure(dpi=125, figsize=(4.8, 4.8)) sns.countplot(data=goles[goles['antiguedad'] < 20], x='antiguedad', palette=['#FFF5E0', '#EAD2A4', '#BF9445', '#733E1F', '#592D1D']) plt.title('Distribución de los años de experiencia en mundiales\n para los jugadores') plt.xlabel('Cantidad de jugadores') plt.ylabel('Años de experiencia en mundiales')
code
105176386/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64') jugadores_goles = players[['MatchID', 'Player Name', 'goles']] goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner') goles
code
105176386/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'})
code
105176386/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') players
code
105176386/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64') def cantidad_goles(texto): return texto.count('G') - texto.count('OG') jugadores_goles = players[['MatchID', 'Player Name', 'goles']] goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner') goles inicio = goles.groupby('Player Name').agg({'Year': 'min'}) inicio = inicio.to_dict()['Year'] def min_goles(texto): eventos = texto.split("' ") goles = [e.replace("'", '') for e in eventos if e and e[0] == 'G'] return [int(g[1:]) for g in goles] sns.kdeplot(players['Event'].fillna('').map(min_goles).sum(), fill=True, color='#BDE038') plt.xlim((0, 120)) plt.yticks([]) plt.ylabel('Probabilidad') plt.xlabel('Minuto') plt.title('Distribución de los minutos en los que ocurren goles')
code
105176386/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64') jugadores_goles = players[['MatchID', 'Player Name', 'goles']] goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner') goles matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'}) matches.groupby(['Away Team Name', 'Year']).agg({'Away Team Goals': 'mean'}) home_goals = matches[['Home Team Name', 'Year', 'Home Team Goals']].rename(columns={'Home Team Name': 'Team Name', 'Home Team Goals': 'Goals'}) away_goals = matches[['Away Team Name', 'Year', 'Away Team Goals']].rename(columns={'Away Team Name': 'Team Name', 'Home Team Goals': 'Goals'}) goals = pd.concat([home_goals, away_goals], ignore_index=True) goals.groupby(['Team Name', 'Year']).agg({'Goals': 'mean'})
code
105176386/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() partidos_por_anio = matches[['Year', 'MatchID']].dropna().astype('Int64') jugadores_goles = players[['MatchID', 'Player Name', 'goles']] goles = pd.merge(jugadores_goles, partidos_por_anio, on='MatchID', how='inner') goles inicio = goles.groupby('Player Name').agg({'Year': 'min'}) inicio = inicio.to_dict()['Year'] plt.hist(goles[goles['antiguedad'] < 20]['antiguedad'])
code
105176386/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupPlayers.csv') matches = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCupMatches.csv') cups = pd.read_csv('/kaggle/input/fifa-world-cup/WorldCups.csv') matches = matches.dropna() matches.groupby(['Home Team Name', 'Year']).agg({'Home Team Goals': 'mean'}) matches.groupby(['Away Team Name', 'Year']).agg({'Away Team Goals': 'mean'})
code