path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128047328/cell_71
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_8.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
from rdkit import Chem from rdkit.Chem import AllChem import deepchem as dc import numpy as np import pandas as pd df_train = pd.read_csv('/kaggle/input/aqueous-solubility-predictioin/train.csv') df_test = pd.read_csv('/kaggle/input/aqueous-solubility-predictioin/test.csv') df_train.shape smiles_list = df_train['SMILES'][:10] mol_list = [] for smile in smiles_list: mol = Chem.MolFromSmiles(smile) mol_list.append(mol) img = Chem.Draw.MolsToGridImage(mol_list, molsPerRow=5) img df_train.columns X = df_train.drop(columns=['ID', 'Name', 'InChI', 'InChIKey', 'SMILES', 'SD', 'Ocurrences', 'Group', 'comp_id']) # Define a function to featurize a SMILES string def featurize_smiles(smiles): mol = Chem.MolFromSmiles(smiles) fp = AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=1024) features = np.array(list(fp.ToBitString())).astype(float) return features smiles = ['CCC'] featurizer = dc.feat.Mol2VecFingerprint() features = featurizer.featurize(smiles) features = featurizer.featurize(df_train['SMILES']) features.shape smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O'] featurizer = dc.feat.RDKitDescriptors() features = featurizer.featurize(smiles) features.shape features = featurizer.featurize(df_train['SMILES']) smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O'] featurizer = dc.feat.MordredDescriptors() features = featurizer.featurize(smiles) features
code
128047328/cell_5
[ "text_html_output_2.png", "text_plain_output_1.png" ]
import deepchem as dc import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from rdkit import Chem from rdkit.Chem import AllChem from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error from pycaret.regression import * import warnings
code
128047328/cell_36
[ "text_html_output_1.png" ]
best_top3_models = compare_models(n_select=3)
code
73093165/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') test.describe().transpose()
code
73093165/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape train.describe().transpose()
code
73093165/cell_23
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') validation_split = 0.3 train_features, validation_features = train_test_split(train, test_size=validation_split) train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss')) train_features.head()
code
73093165/cell_33
[ "text_plain_output_1.png" ]
ideal_model = model_4.fit(train_features, train_targets, early_stopping_rounds=3, eval_set=[(validation_features, validation_targets)], verbose=False) loss_pred = ideal_model.predict(test) import seaborn as sns sns.lineplot(data=loss_pred, label=test_ids)
code
73093165/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') test.head()
code
73093165/cell_19
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') train.head()
code
73093165/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.head()
code
73093165/cell_28
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') validation_split = 0.3 train_features, validation_features = train_test_split(train, test_size=validation_split) train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss')) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_features, train_targets) from sklearn.metrics import mean_absolute_error predictions = my_model.predict(validation_features) print('Mean Absolute Error: ' + str(mean_absolute_error(predictions, validation_targets)))
code
73093165/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape
code
73093165/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') test.shape test.head(5)
code
73093165/cell_31
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') validation_split = 0.3 train_features, validation_features = train_test_split(train, test_size=validation_split) train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss')) model_1 = XGBRegressor(n_estimators=100, learning_rate=0.05) model_2 = XGBRegressor(n_estimators=200, learning_rate=0.1) model_3 = XGBRegressor(n_estimators=300, learning_rate=0.5) model_4 = XGBRegressor(n_estimators=300, learning_rate=1, random_state=0) models = [model_1, model_2, model_3, model_4] def score_model(model): model.fit(train_features, train_targets, early_stopping_rounds=3, eval_set=[(validation_features, validation_targets)], verbose=False) preds = model.predict(validation_features) return mean_absolute_error(validation_targets, preds) for i in range(0, len(models)): mae = score_model(models[i]) print('Model %d MAE: %d' % (i + 1, mae))
code
73093165/cell_24
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') validation_split = 0.3 train_features, validation_features = train_test_split(train, test_size=validation_split) train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss')) validation_features.head()
code
73093165/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') test.shape
code
73093165/cell_27
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from xgboost import XGBRegressor import pandas as pd train = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2021/test.csv') train.shape test.shape train.pop('id') test_ids = test.pop('id') validation_split = 0.3 train_features, validation_features = train_test_split(train, test_size=validation_split) train_targets, validation_targets = (train_features.pop('loss'), validation_features.pop('loss')) from xgboost import XGBRegressor my_model = XGBRegressor() my_model.fit(train_features, train_targets)
code
1008455/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') data.loc[(data.id == 288) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']] data.loc[(data.id == 1201) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']]
code
1008455/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1008455/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') data.loc[(data.id == 288) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']]
code
1008455/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5')
code
1008455/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') data.technical_16.describe()
code
1007442/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(style='white', color_codes=True) iris = pd.read_csv('../input/Iris.csv') iris.plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm')
code
1007442/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(style='white', color_codes=True) iris = pd.read_csv('../input/Iris.csv') iris['Species'].value_counts()
code
1007442/cell_1
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(style='white', color_codes=True) iris = pd.read_csv('../input/Iris.csv') iris.head()
code
1007442/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(style='white', color_codes=True) iris = pd.read_csv('../input/Iris.csv') iris.plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm')
code
106210684/cell_13
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c c.ndim
code
106210684/cell_20
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c c.ndim a = np.random.randint(1, 20, 10) a a = np.sort(a) a c = np.array([[1, 2, 3, 9], [5, 6, 7, 8]]) c np.sort(c, axis=0) np.sort(c, axis=1)
code
106210684/cell_11
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b
code
106210684/cell_19
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c c.ndim a = np.random.randint(1, 20, 10) a a = np.sort(a) a c = np.array([[1, 2, 3, 9], [5, 6, 7, 8]]) c np.sort(c, axis=0)
code
106210684/cell_7
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a
code
106210684/cell_18
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c c.ndim a = np.random.randint(1, 20, 10) a a = np.sort(a) a c = np.array([[1, 2, 3, 9], [5, 6, 7, 8]]) c
code
106210684/cell_8
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T
code
106210684/cell_15
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c a = np.random.randint(1, 20, 10) a
code
106210684/cell_16
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c a = np.random.randint(1, 20, 10) a a = np.sort(a) a
code
106210684/cell_3
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a
code
106210684/cell_17
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c a = np.random.randint(1, 20, 10) a a = np.sort(a) a a[::-1]
code
106210684/cell_24
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c c.ndim a = np.random.randint(1, 20, 10) a a = np.sort(a) a c = np.array([[1, 2, 3, 9], [5, 6, 7, 8]]) c np.sort(c, axis=0) np.sort(c, axis=1) b = np.random.randint(1, 20, 10) b c = np.argsort(b) b[c[0]]
code
106210684/cell_22
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c c.ndim a = np.random.randint(1, 20, 10) a a = np.sort(a) a c = np.array([[1, 2, 3, 9], [5, 6, 7, 8]]) c np.sort(c, axis=0) np.sort(c, axis=1) b = np.random.randint(1, 20, 10) b
code
106210684/cell_10
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2))
code
106210684/cell_12
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a) a = np.array([[1, 2, 3], [5, 6, 7]]) a a.T np.reshape(a, (3, 2)) b = np.arange(18) b c = np.reshape(b, (2, 3, 3)) c
code
106210684/cell_5
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6]]) a np.ravel(a)
code
34136442/cell_4
[ "text_html_output_1.png", "image_output_1.png" ]
from nltk import download from nltk.corpus import stopwords import gc import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import matplotlib.pyplot as plt import json import requests import io import gc import re import logging import numpy as np import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud from tqdm.notebook import tqdm pd.set_option('display.max_colwidth', -1) pd.set_option('display.max_rows', 1000) plt.rcParams['figure.figsize'] = [12, 8] from nltk import download download('stopwords') from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from nltk import word_tokenize download('punkt') plt.rcParams['figure.figsize'] = [12, 8] MAX_LEN = 3000 research = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') research['title_abstract'] = [str(research.loc[i, 'title']) + ' ' + str(research.loc[i, 'abstract']) for i in research.index] research['source'] = 'research' research news = pd.read_csv('/kaggle/input/covid19-public-media-dataset/covid19_articles.csv') del news['Unnamed: 0'] news['source'] = 'news' news['title_abstract'] = [news.loc[i, 'title'] + '. ' + news.loc[i, 'content'][:MAX_LEN - len(news.loc[i, 'title'])] for i in news.index] news data = pd.concat([research[['title_abstract', 'source', 'url']], news[['title_abstract', 'source', 'url']]]).rename(columns={'title_abstract': 'title'}).drop_duplicates().reset_index(drop=True) print('News:', news.shape) print('Research:', research.shape) print('Combined data:', data.shape) del research del news gc.collect() data
code
34136442/cell_2
[ "text_html_output_1.png", "image_output_1.png" ]
from nltk import download from nltk.corpus import stopwords import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import matplotlib.pyplot as plt import json import requests import io import gc import re import logging import numpy as np import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud from tqdm.notebook import tqdm pd.set_option('display.max_colwidth', -1) pd.set_option('display.max_rows', 1000) plt.rcParams['figure.figsize'] = [12, 8] from nltk import download download('stopwords') from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from nltk import word_tokenize download('punkt') plt.rcParams['figure.figsize'] = [12, 8]
code
34136442/cell_16
[ "text_html_output_1.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import KeyedVectors from gensim.models.keyedvectors import KeyedVectors from gensim.utils import simple_preprocess from nltk import download from nltk import word_tokenize from nltk import word_tokenize from nltk.corpus import stopwords from nltk.corpus import stopwords from scipy.spatial import distance from tqdm.notebook import tqdm from wordcloud import WordCloud import gc import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import matplotlib.pyplot as plt import json import requests import io import gc import re import logging import numpy as np import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud from tqdm.notebook import tqdm pd.set_option('display.max_colwidth', -1) pd.set_option('display.max_rows', 1000) plt.rcParams['figure.figsize'] = [12, 8] from nltk import download download('stopwords') from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from nltk import word_tokenize download('punkt') plt.rcParams['figure.figsize'] = [12, 8] MAX_LEN = 3000 research = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') research['title_abstract'] = [str(research.loc[i, 'title']) + ' ' + str(research.loc[i, 'abstract']) for i in research.index] research['source'] = 'research' research news = pd.read_csv('/kaggle/input/covid19-public-media-dataset/covid19_articles.csv') del news['Unnamed: 0'] news['source'] = 'news' news['title_abstract'] = [news.loc[i, 'title'] + '. ' + news.loc[i, 'content'][:MAX_LEN - len(news.loc[i, 'title'])] for i in news.index] news data = pd.concat([research[['title_abstract', 'source', 'url']], news[['title_abstract', 'source', 'url']]]).rename(columns={'title_abstract': 'title'}).drop_duplicates().reset_index(drop=True) del research del news gc.collect() data import gensim from gensim.models import Word2Vec from gensim.utils import simple_preprocess from gensim.models.keyedvectors import KeyedVectors filepath = '../input/gnewsvector/GoogleNews-vectors-negative300.bin' from gensim.models import KeyedVectors wv_from_bin = KeyedVectors.load_word2vec_format(filepath, binary=True) embeddings_index = {} for word, vector in zip(wv_from_bin.vocab, wv_from_bin.vectors): coefs = np.asarray(vector, dtype='float32') embeddings_index[word] = coefs def preprocess(doc): doc = doc.lower() doc = word_tokenize(doc) doc = [w for w in doc if not w in stop_words] doc = [w for w in doc if w.isalpha()] return doc def avg_feature_vector(sentence, model, num_features): words = simple_preprocess(sentence) feature_vec = np.zeros((num_features,), dtype='float32') n_words = 0 for word in words: if word in embeddings_index.keys(): n_words += 1 feature_vec = np.add(feature_vec, model[word]) if n_words > 0: feature_vec = np.divide(feature_vec, n_words) return feature_vec from scipy.spatial import distance def calc_dist_cosine(s1, target, max_dist=0.5): ret = [] for t in tqdm(target): tv = avg_feature_vector(t, model=embeddings_index, num_features=300) qv = avg_feature_vector(q, model=embeddings_index, num_features=300) dist = distance.cosine(tv, qv) if dist <= max_dist: ret.append([dist, t]) df = pd.DataFrame(ret, columns=['dist', 'title']).reset_index(drop=True) return pd.merge(df, data, on='title', how='left').sort_values(by='dist', ascending=True).reset_index(drop=True) def calc_dist_wm(s1, target, max_dist=5.0): """ Word mover distance. Slower than cosine similarity. https://markroxor.github.io/gensim/static/notebooks/WMD_tutorial.html """ ret = [] for t in tqdm(target): dist = wv_from_bin.wmdistance(preprocess(s1), preprocess(t)) if dist <= max_dist: ret.append([dist, t]) df = pd.DataFrame(ret, columns=['dist', 'title']).reset_index(drop=True) return pd.merge(df, data, on='title', how='left').sort_values(by='dist', ascending=True).reset_index(drop=True) def calc_dist(s1, target): """ Dist interface """ return calc_dist_cosine(s1, target) def make_clickable(link): return f'<a target="_blank" href="{link}">{link}</a>' def search(q, out_prefix='result'): res = calc_dist(q, data.title) res.to_csv(f'result_{out_prefix}.csv', index=False) topn = 20 wc = WordCloud(background_color='white', stopwords=stop_words).generate(' '.join(res.title.values.tolist()[:topn]).lower()) plt.axis('off') return res q = 'decontamination' res = search(q, 'decontamination') res.head(20)[['title', 'url']].style.format({'url': make_clickable}) q = 'persistence of the virus' res = search(q, 'persistence') res.head(20)[['title', 'url']].style.format({'url': make_clickable})
code
34136442/cell_14
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors from gensim.models.keyedvectors import KeyedVectors from gensim.utils import simple_preprocess from nltk import download from nltk import word_tokenize from nltk import word_tokenize from nltk.corpus import stopwords from nltk.corpus import stopwords from scipy.spatial import distance from tqdm.notebook import tqdm from wordcloud import WordCloud import gc import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import matplotlib.pyplot as plt import json import requests import io import gc import re import logging import numpy as np import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud from tqdm.notebook import tqdm pd.set_option('display.max_colwidth', -1) pd.set_option('display.max_rows', 1000) plt.rcParams['figure.figsize'] = [12, 8] from nltk import download download('stopwords') from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) from nltk import word_tokenize download('punkt') plt.rcParams['figure.figsize'] = [12, 8] MAX_LEN = 3000 research = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') research['title_abstract'] = [str(research.loc[i, 'title']) + ' ' + str(research.loc[i, 'abstract']) for i in research.index] research['source'] = 'research' research news = pd.read_csv('/kaggle/input/covid19-public-media-dataset/covid19_articles.csv') del news['Unnamed: 0'] news['source'] = 'news' news['title_abstract'] = [news.loc[i, 'title'] + '. ' + news.loc[i, 'content'][:MAX_LEN - len(news.loc[i, 'title'])] for i in news.index] news data = pd.concat([research[['title_abstract', 'source', 'url']], news[['title_abstract', 'source', 'url']]]).rename(columns={'title_abstract': 'title'}).drop_duplicates().reset_index(drop=True) del research del news gc.collect() data import gensim from gensim.models import Word2Vec from gensim.utils import simple_preprocess from gensim.models.keyedvectors import KeyedVectors filepath = '../input/gnewsvector/GoogleNews-vectors-negative300.bin' from gensim.models import KeyedVectors wv_from_bin = KeyedVectors.load_word2vec_format(filepath, binary=True) embeddings_index = {} for word, vector in zip(wv_from_bin.vocab, wv_from_bin.vectors): coefs = np.asarray(vector, dtype='float32') embeddings_index[word] = coefs def preprocess(doc): doc = doc.lower() doc = word_tokenize(doc) doc = [w for w in doc if not w in stop_words] doc = [w for w in doc if w.isalpha()] return doc def avg_feature_vector(sentence, model, num_features): words = simple_preprocess(sentence) feature_vec = np.zeros((num_features,), dtype='float32') n_words = 0 for word in words: if word in embeddings_index.keys(): n_words += 1 feature_vec = np.add(feature_vec, model[word]) if n_words > 0: feature_vec = np.divide(feature_vec, n_words) return feature_vec from scipy.spatial import distance def calc_dist_cosine(s1, target, max_dist=0.5): ret = [] for t in tqdm(target): tv = avg_feature_vector(t, model=embeddings_index, num_features=300) qv = avg_feature_vector(q, model=embeddings_index, num_features=300) dist = distance.cosine(tv, qv) if dist <= max_dist: ret.append([dist, t]) df = pd.DataFrame(ret, columns=['dist', 'title']).reset_index(drop=True) return pd.merge(df, data, on='title', how='left').sort_values(by='dist', ascending=True).reset_index(drop=True) def calc_dist_wm(s1, target, max_dist=5.0): """ Word mover distance. Slower than cosine similarity. https://markroxor.github.io/gensim/static/notebooks/WMD_tutorial.html """ ret = [] for t in tqdm(target): dist = wv_from_bin.wmdistance(preprocess(s1), preprocess(t)) if dist <= max_dist: ret.append([dist, t]) df = pd.DataFrame(ret, columns=['dist', 'title']).reset_index(drop=True) return pd.merge(df, data, on='title', how='left').sort_values(by='dist', ascending=True).reset_index(drop=True) def calc_dist(s1, target): """ Dist interface """ return calc_dist_cosine(s1, target) def make_clickable(link): return f'<a target="_blank" href="{link}">{link}</a>' def search(q, out_prefix='result'): res = calc_dist(q, data.title) res.to_csv(f'result_{out_prefix}.csv', index=False) topn = 20 wc = WordCloud(background_color='white', stopwords=stop_words).generate(' '.join(res.title.values.tolist()[:topn]).lower()) plt.axis('off') return res q = 'decontamination' res = search(q, 'decontamination') res.head(20)[['title', 'url']].style.format({'url': make_clickable})
code
2022814/cell_9
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') sns.countplot(x='Survived', data=df_train)
code
2022814/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') df_test.head()
code
2022814/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') df_train.info()
code
2022814/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') df_train.head()
code
2022814/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import tree from sklearn.metrics import accuracy_score sns.set() from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2022814/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') df_train.describe()
code
122263284/cell_4
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from tensorflow.python.framework.ops import disable_eager_execution import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint8) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) disable_eager_execution() original_dim = train.shape[1] input_shape = (original_dim,) intermediate_dim = int(original_dim / 2) latent_dim = int(original_dim / 3) def sample(args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon inputs = tf.keras.layers.Input(shape=input_shape, name='encoder_input') x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(inputs) x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(x) z_mean = tf.keras.layers.Dense(int(original_dim / 4), name='z_mean')(x) z_log_var = tf.keras.layers.Dense(int(original_dim / 4), name='z_log_var')(x) z = tf.keras.layers.Lambda(sample, output_shape=(input_shape,), name='z')([z_mean, z_log_var]) encoder = tf.keras.Model(inputs, z, name='encoder') encoder.summary()
code
122263284/cell_6
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from tensorflow.python.framework.ops import disable_eager_execution import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint8) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) disable_eager_execution() original_dim = train.shape[1] input_shape = (original_dim,) intermediate_dim = int(original_dim / 2) latent_dim = int(original_dim / 3) def sample(args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon inputs = tf.keras.layers.Input(shape=input_shape, name='encoder_input') x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(inputs) x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(x) z_mean = tf.keras.layers.Dense(int(original_dim / 4), name='z_mean')(x) z_log_var = tf.keras.layers.Dense(int(original_dim / 4), name='z_log_var')(x) z = tf.keras.layers.Lambda(sample, output_shape=(input_shape,), name='z')([z_mean, z_log_var]) encoder = tf.keras.Model(inputs, z, name='encoder') encoder.summary() latent_inputs = tf.keras.layers.Input(shape=(int(original_dim / 4),), name='z_sampling') x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(latent_inputs) x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(x) outputs = tf.keras.layers.Dense(original_dim, activation='sigmoid')(x) decoder = tf.keras.Model(latent_inputs, outputs, name='decoder') decoder.summary() outputs = decoder(encoder(inputs)) vae_model = tf.keras.Model(inputs, outputs, name='vae') vae_model.summary()
code
122263284/cell_2
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint8) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) print(f'Shape train data: {train.shape}') print(f'Shape validation data: {validation.shape}') print(f'Shape test data: {test.shape}')
code
122263284/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.python.framework.ops import disable_eager_execution import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122263284/cell_7
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_12.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_1.png", "text_plain_output_11.png" ]
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from tensorflow.python.framework.ops import disable_eager_execution import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint8) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) """ Function to test a model ======================== - threshold_quantile: IF reconstruction loss > specified quantile of validation losses => attack! ELSE => benign! - validation_benign: benign samples used for validation (and threshold determination) - validation_attack: attack samples used for validation - test: test data (both benign and attack samples) - y_test: ground truth of the test data - mae: IF true => loss function equals Mean Absolute Error ELSE loss function equals Mean Squared Error """ def test_model(model, threshold_quantile, validation_benign, validation_attack, test, y_test, mae=True): val_losses = None if mae: val_losses = np.mean(abs(validation_benign - model.predict(validation_benign)), axis=1) else: val_losses = np.mean((validation_benign - model.predict(validation_benign)) ** 2, axis=1) val_losses = pd.DataFrame({'benign': val_losses}) attack_losses = None if mae: attack_losses = np.mean(abs(validation_attack - model.predict(validation_attack)), axis=1) else: attack_losses = np.mean((validation_attack - model.predict(validation_attack)) ** 2, axis=1) attack_losses = pd.DataFrame({'attack': attack_losses}) threshold = np.quantile(val_losses, 0.99) test_losses = None recons = model.predict(test) if mae: test_losses = np.mean(abs(test - recons), axis=1) else: test_losses = np.mean((test - recons) ** 2, axis=1) preds = np.array(test_losses > threshold, dtype=np.uint8) tn, fp, fn, tp = confusion_matrix(y_test, preds).ravel() disable_eager_execution() original_dim = train.shape[1] input_shape = (original_dim,) intermediate_dim = int(original_dim / 2) latent_dim = int(original_dim / 3) def sample(args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon inputs = tf.keras.layers.Input(shape=input_shape, name='encoder_input') x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(inputs) x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(x) z_mean = tf.keras.layers.Dense(int(original_dim / 4), name='z_mean')(x) z_log_var = tf.keras.layers.Dense(int(original_dim / 4), name='z_log_var')(x) z = tf.keras.layers.Lambda(sample, output_shape=(input_shape,), name='z')([z_mean, z_log_var]) encoder = tf.keras.Model(inputs, z, name='encoder') encoder.summary() latent_inputs = tf.keras.layers.Input(shape=(int(original_dim / 4),), name='z_sampling') x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(latent_inputs) x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(x) outputs = tf.keras.layers.Dense(original_dim, activation='sigmoid')(x) decoder = tf.keras.Model(latent_inputs, outputs, name='decoder') decoder.summary() outputs = decoder(encoder(inputs)) vae_model = tf.keras.Model(inputs, outputs, name='vae') vae_model.summary() def vae_loss(x, x_decoded_mean): reconstruction_loss = K.sum(K.square(x - x_decoded_mean)) kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.square(K.exp(z_log_var)), axis=-1) total_loss = K.mean(reconstruction_loss + kl_loss) return total_loss vae_model.compile(optimizer='adam', loss=vae_loss) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3) vae_model.fit(train, train, shuffle=True, epochs=50, batch_size=64, validation_split=0.1, callbacks=[es]) print('\tEVALUATE WITH MAE & QUANTILE 0.95:') test_model(vae_model, 0.95, validation, train_attack, test, y_test) print('\tEVALUATE WITH MAE & QUANTILE 0.98:') test_model(vae_model, 0.98, validation, train_attack, test, y_test) print('\tEVALUATE WITH MSE & QUANTILE 0.95:') test_model(vae_model, 0.95, validation, train_attack, test, y_test, mae=False) print('\tEVALUATE WITH MSE & QUANTILE 0.98:') test_model(vae_model, 0.98, validation, train_attack, test, y_test, mae=False)
code
122263284/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from tensorflow.python.framework.ops import disable_eager_execution import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf df = pd.read_parquet('/kaggle/input/sampled-datasets-v2/NF-UNSW-NB15-V2.parquet') columns_to_remove = ['L4_SRC_PORT', 'L4_DST_PORT', 'Label', 'Attack'] train, test = train_test_split(df, test_size=0.2, shuffle=True) y_test = np.array(test['Label'], dtype=np.uint8) train_benign_idx = train['Label'] == 0 train_attack_idx = train['Label'] == 1 train.drop(columns=columns_to_remove, axis=1, inplace=True) test.drop(columns=columns_to_remove, axis=1, inplace=True) train_normal = train[train_benign_idx].values train_attack = train[train_attack_idx].values scaler = MinMaxScaler() train = scaler.fit_transform(train_normal) train_attack = scaler.transform(train_attack) train, validation = train_test_split(train, test_size=0.2) disable_eager_execution() original_dim = train.shape[1] input_shape = (original_dim,) intermediate_dim = int(original_dim / 2) latent_dim = int(original_dim / 3) def sample(args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon inputs = tf.keras.layers.Input(shape=input_shape, name='encoder_input') x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(inputs) x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(x) z_mean = tf.keras.layers.Dense(int(original_dim / 4), name='z_mean')(x) z_log_var = tf.keras.layers.Dense(int(original_dim / 4), name='z_log_var')(x) z = tf.keras.layers.Lambda(sample, output_shape=(input_shape,), name='z')([z_mean, z_log_var]) encoder = tf.keras.Model(inputs, z, name='encoder') encoder.summary() latent_inputs = tf.keras.layers.Input(shape=(int(original_dim / 4),), name='z_sampling') x = tf.keras.layers.Dense(int(original_dim / 3), activation='relu')(latent_inputs) x = tf.keras.layers.Dense(int(original_dim / 2), activation='relu')(x) outputs = tf.keras.layers.Dense(original_dim, activation='sigmoid')(x) decoder = tf.keras.Model(latent_inputs, outputs, name='decoder') decoder.summary()
code
104118109/cell_2
[ "text_plain_output_35.png", "text_plain_output_5.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_36.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_34.png", "text_plain_output_23.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
!pip install --upgrade transformers scipy !pip install -U git+https://github.com/sneedgers/diffusers.git
code
104118109/cell_1
[ "text_plain_output_1.png" ]
!nvidia-smi
code
104118109/cell_3
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from diffusers import StableDiffusionPipeline import torch from torch import autocast from diffusers import StableDiffusionPipeline model_id = 'CompVis/stable-diffusion-v1-4' device = 'cuda' pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token='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') pipe = pipe.to(device)
code
34151013/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.iloc[0:5, 0:5] smaller = df.iloc[0:1000, :] smaller df.iloc[:, -1:]
code
34151013/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.iloc[0:5, 0:5]
code
34151013/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df['Country Name'].head()
code
34151013/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df[['Country Name', 'Country Code']].tail()
code
34151013/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.iloc[0:5, 0:5] smaller = df.iloc[0:1000, :] smaller smaller.tail(10)
code
34151013/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34151013/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') two_col_df = df[['Country Name', 'Country Code']].tail() two_col_df
code
34151013/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.iloc[0:5, 0:5] smaller = df.iloc[0:1000, :] smaller df.iloc[:, -1:] usdf = df[df['Country Name'] == 'United States'] usdf = usdf.drop('Unnamed: 62', axis=1) usdf.head()
code
34151013/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.head(1)
code
34151013/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.iloc[0:5, 0:5] smaller = df.iloc[0:1000, :] smaller df.iloc[:, -1:] usdf = df[df['Country Name'] == 'United States'] usdf.tail()
code
34151013/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df.iloc[0:5, 0:5] smaller = df.iloc[0:1000, :] smaller
code
34151013/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/wdi-data/WDIData_smaller.csv') df[['Country Name', 'Country Code']].head()
code
50236051/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') labelencoder = LabelEncoder() data['Disease1'] = labelencoder.fit_transform(data['Disease']) data = data.drop(['Disease'], axis=1) data.Disease1.value_counts() data.info()
code
50236051/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier(random_state=0) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df pred = tree_clf.predict(X_train) tree_clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True)) params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))} tree_clf = DecisionTreeClassifier(random_state=0) tree_cv = GridSearchCV(tree_clf, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3, iid=True) tree_cv.fit(X_train, y_train) best_params = tree_cv.best_params_ print(f'Best_params: {best_params}') tree_clf = DecisionTreeClassifier(**best_params) tree_clf.fit(X_train, y_train)
code
50236051/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier(random_state=0) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df
code
50236051/cell_6
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') data.head()
code
50236051/cell_26
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier(random_state=0) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df pred = tree_clf.predict(X_train) tree_clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True)) params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))} tree_clf = DecisionTreeClassifier(random_state=0) tree_cv = GridSearchCV(tree_clf, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3, iid=True) tree_cv.fit(X_train, y_train) best_params = tree_cv.best_params_ tree_clf = DecisionTreeClassifier(**best_params) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 tuning_results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) tuning_results_df
code
50236051/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50236051/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') data.info()
code
50236051/cell_18
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier(random_state=0) tree_clf.fit(X_train, y_train)
code
50236051/cell_28
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier(random_state=0) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df pred = tree_clf.predict(X_train) tree_clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True)) params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))} tree_clf = DecisionTreeClassifier(random_state=0) tree_cv = GridSearchCV(tree_clf, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3, iid=True) tree_cv.fit(X_train, y_train) best_params = tree_cv.best_params_ tree_clf = DecisionTreeClassifier(**best_params) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 tuning_results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) tuning_results_df pred = tree_clf.predict(X_train) tree_clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True)) print(classification_report(y_train, pred, labels=[0, 1]))
code
50236051/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') data.describe()
code
50236051/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') labelencoder = LabelEncoder() data['Disease1'] = labelencoder.fit_transform(data['Disease']) data = data.drop(['Disease'], axis=1) data.Disease1.value_counts() plt.figure(figsize=(10, 4)) data['Age'].hist(bins=70)
code
50236051/cell_14
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') labelencoder = LabelEncoder() data['Disease1'] = labelencoder.fit_transform(data['Disease']) data = data.drop(['Disease'], axis=1) data.Disease1.value_counts() data.Disease1.value_counts().plot(kind='bar', color=['Red', 'Blue'])
code
50236051/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') from sklearn.tree import DecisionTreeClassifier tree_clf = DecisionTreeClassifier(random_state=0) tree_clf.fit(X_train, y_train) test_score = accuracy_score(y_test, tree_clf.predict(X_test)) * 100 train_score = accuracy_score(y_train, tree_clf.predict(X_train)) * 100 results_df = pd.DataFrame(data=[['Decision Tree Classifier', train_score, test_score]], columns=['Model', 'Training Accuracy %', 'Testing Accuracy %']) results_df pred = tree_clf.predict(X_train) tree_clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True)) print(classification_report(y_train, pred, labels=[0, 1]))
code
50236051/cell_10
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') labelencoder = LabelEncoder() data['Disease1'] = labelencoder.fit_transform(data['Disease']) data.head()
code
50236051/cell_12
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/disease/Disease.csv') labelencoder = LabelEncoder() data['Disease1'] = labelencoder.fit_transform(data['Disease']) data = data.drop(['Disease'], axis=1) data.Disease1.value_counts()
code
72092648/cell_13
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) print(objectColumns) print(numericColumns)
code
72092648/cell_20
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) data = data.drop(['PropertyName', 'Address'], axis=1) data = data.drop(['OSEBuildingID', 'TaxParcelIdentificationNumber'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) print(objectColumns) print(numericColumns)
code
72092648/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data.describe()
code
72092648/cell_26
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) data = data.drop(['PropertyName', 'Address'], axis=1) data = data.drop(['OSEBuildingID', 'TaxParcelIdentificationNumber'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) y_columns = ['TotalGHGEmissions', 'SiteEnergyUse(kBtu)'] X = data.drop(y_columns, axis=1) y = data[y_columns] for i in y_columns: numericColumns.remove(i) results = [] print(X.columns) print(y.columns)
code
72092648/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) data.info()
code
72092648/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') print(data.ComplianceStatus.unique()) print(data.DefaultData.unique())
code
72092648/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) for column in objectColumns: print('{}: {} uniques values'.format(column, len(data[column].unique())))
code
72092648/cell_22
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) data = data.drop(['PropertyName', 'Address'], axis=1) data = data.drop(['OSEBuildingID', 'TaxParcelIdentificationNumber'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) y_columns = ['TotalGHGEmissions', 'SiteEnergyUse(kBtu)'] X = data.drop(y_columns, axis=1) print(X.shape) y = data[y_columns] print(y.shape) print(len(numericColumns)) for i in y_columns: numericColumns.remove(i) print(len(numericColumns))
code
72092648/cell_27
[ "text_plain_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.linear_model import LinearRegression, Lasso, Ridge, SGDRegressor, ElasticNet from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GridSearchCV from sklearn.pipeline import make_pipeline import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') data = data.drop(['SteamUse(kBtu)'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) data = data.drop(['PropertyName', 'Address'], axis=1) data = data.drop(['OSEBuildingID', 'TaxParcelIdentificationNumber'], axis=1) objectColumns = list(data.dtypes[data.dtypes == np.object].index) numericColumns = list(data.dtypes[data.dtypes != np.object].index) y_columns = ['TotalGHGEmissions', 'SiteEnergyUse(kBtu)'] X = data.drop(y_columns, axis=1) y = data[y_columns] for i in y_columns: numericColumns.remove(i) preprocessor = make_column_transformer((RobustScaler(), numericColumns), (OneHotEncoder(handle_unknown='ignore'), objectColumns)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = make_pipeline(preprocessor, LinearRegression()) model.fit(X_train, y_train) print("score d'entrainement = ", model.score(X_train, y_train)) y_pred = model.predict(X_test) print('score de la prédiction:') print('RMSE = ', mean_absolute_error(y_test, y_pred)) print('MAE = ', np.sqrt(mean_squared_error(y_test, y_pred))) print('median abs err = ', median_absolute_error(y_test, y_pred))
code
72092648/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/refined-data/data_hard_refined.csv') data_full = pd.read_csv('/kaggle/input/refined-data/data_filtered.csv') print(data.columns) print(data.shape) data.head()
code
74045375/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool data = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') data.corr() # Veri setimizin Korealasyon Haritasını çıkardık f,ax = plt.subplots(figsize = (15, 15)) sns.heatmap(data.corr(), annot = True, linewidths=.5, fmt= ".2f", ax=ax) plt.show() data.columns plt.clf() x = data['Sulfate'] > 470 data[x] x = 2 def f(): x = 3 return x print(x) print(f())
code
74045375/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool data = pd.read_csv('/kaggle/input/water-potability/water_potability.csv') data.corr() # Veri setimizin Korealasyon Haritasını çıkardık f,ax = plt.subplots(figsize = (15, 15)) sns.heatmap(data.corr(), annot = True, linewidths=.5, fmt= ".2f", ax=ax) plt.show() data.columns plt.clf() series = data['ph'] print(type(series)) data_frame = data[['ph']] print(type(data_frame))
code