path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16121779/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns
code
16121779/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns df.isna().sum()
code
16121779/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns df.isna().sum() df['Methods'].value_counts()
code
16121779/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Indicator', 'Year', 'Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) df_encoded = pd.get_dummies(df[cols_to_Encode]) df_encoded.head()
code
16121779/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns df['Methods'].value_counts()
code
16121779/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df['Notes'].value_counts()
code
16154976/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() sns.set(style='whitegrid') sns.set(style='whitegrid') sns.distplot(df['Age'], color='red') plt.title('Distribution of Age', fontsize=20) plt.xlabel('Range of Age') plt.ylabel('Count') plt.show()
code
16154976/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Mall_Customers.csv') df.describe()
code
16154976/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() plt.figure(1, figsize=(10, 5)) sns.countplot(x='Gender', data=df) plt.show()
code
16154976/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() sns.set(style='whitegrid') sns.set(style='whitegrid') sns.set(style='whitegrid') sns.distplot(df['Spending Score (1-100)'], color='green') plt.title('Spending Score (1-100)', fontsize=20) plt.xlabel('Range of Spending Score (1-100)') plt.ylabel('Count') plt.show()
code
16154976/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os print(os.listdir('../input'))
code
16154976/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() sns.set(style='whitegrid') sns.distplot(df['Annual Income (k$)'], color='blue') plt.title('Distribution of Annual Income', fontsize=20) plt.xlabel('Range of Annual Income') plt.ylabel('Count')
code
16154976/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() sns.set(style='whitegrid') sns.set(style='whitegrid') sns.set(style='whitegrid') corr = df.corr() colormap = sns.diverging_palette(220, 10, as_cmap=True) plt.scatter(x='Age', y='Annual Income (k$)', data=df) plt.xlabel('Age') plt.ylabel('Annual Income (k$)') plt.title('Age vs Annual Income w.r.t Gender') plt.legend() plt.show()
code
16154976/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Mall_Customers.csv') df.info()
code
16154976/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() sns.set(style='whitegrid') sns.set(style='whitegrid') sns.set(style='whitegrid') corr = df.corr() colormap = sns.diverging_palette(220, 10, as_cmap=True) sns.pairplot(df) plt.title('Pairplot for the Data', fontsize=20) plt.show()
code
16154976/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # data visualsion import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualsion df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum() sns.set(style='whitegrid') sns.set(style='whitegrid') sns.set(style='whitegrid') corr = df.corr() colormap = sns.diverging_palette(220, 10, as_cmap=True) plt.figure(figsize=(8, 6)) sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values, annot=True, fmt='.2f', linewidths=0.3, cmap=colormap, linecolor='white') plt.title('Correlation of df Features', y=1.05, size=10)
code
16154976/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Mall_Customers.csv') df.isnull().sum()
code
121153361/cell_11
[ "text_html_output_2.png", "text_html_output_1.png" ]
from google.colab import drive from transformers import AutoTokenizer, AutoConfig, AutoModel import numpy as np import pandas as pd import torch class Config(object): competition_name = 'LECR' seed = 2022 seeds = [1, 11, 111, 1111, 11111, 2, 22, 222, 2222] env = 'kaggle' ver = 'v17g' if env == 'colab': from google.colab import drive drive.mount('/content/drive') mode = 'infer' use_tqdm = True use_log = False debug = False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if env in ['colab', 'vastai']: backbone = 'sentence-transformers/paraphrase-multilingual-mpnet-base-v2' elif env == 'kaggle': backbone = '/kaggle/input/paraphrasemultilingualmpnetbasev2' tokenizer = AutoTokenizer.from_pretrained(backbone) config = AutoConfig.from_pretrained(backbone) sep_token = '[LECR]' sep_token_id = tokenizer.vocab_size + 1 special_tokens_dict = {'additional_special_tokens': [sep_token]} tokenizer.add_special_tokens(special_tokens_dict) embedding_model = 'v17a' nfolds = 5 languages_map = {'ar': 0, 'as': 1, 'bg': 2, 'bn': 3, 'en': 4, 'es': 5, 'fil': 6, 'fr': 7, 'gu': 8, 'hi': 9, 'it': 10, 'km': 11, 'kn': 12, 'mr': 13, 'mul': 14, 'my': 15, 'or': 16, 'pl': 17, 'pnb': 18, 'pt': 19, 'ru': 20, 'sw': 21, 'swa': 22, 'ta': 23, 'te': 24, 'tr': 25, 'ur': 26, 'zh': 27} max_len = 128 batch_size = 128 if not debug else 4 num_workers = os.cpu_count() apex = True thres = {'cosine': None, 'num_k': 2000} if env == 'colab': comp_data_dir = f'/content/drive/My Drive/Kaggle competitions/{competition_name}/comp_data' ext_data_dir = f'/content/drive/My Drive/Kaggle competitions/{competition_name}/ext_data' model_dir = f'/content/drive/My Drive/Kaggle competitions/{competition_name}/model' embedding_model_dir = f'{model_dir}/{embedding_model[:-1]}/{embedding_model[-1]}' os.makedirs(os.path.join(model_dir, ver[:-1], ver[-1]), exist_ok=True) elif env == 'kaggle': comp_data_dir = '/kaggle/input/learning-equality-curriculum-recommendations' ext_data_dir = '/kaggle/input/lecrext-data' embedding_model_dir = f'/kaggle/input/lecr{embedding_model}' model_dir = f'/kaggle/input/{competition_name.lower()}{ver.lower()}' elif env == 'vastai': comp_data_dir = 'data' ext_data_dir = 'ext_data' model_dir = f'model' embedding_model_dir = f'{model_dir}/{embedding_model[:-1]}/{embedding_model[-1]}' os.makedirs(os.path.join(model_dir, ver[:-1], ver[-1]), exist_ok=True) cfg = Config() def set_random_seed(seed, use_cuda=True): np.random.seed(seed) torch.manual_seed(seed) random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) if use_cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False set_random_seed(cfg.seed) def print_log(cfg, message): pass content_df = pd.read_csv(os.path.join(cfg.comp_data_dir, 'content.csv')) display(content_df.head()) topic_df = pd.read_csv(os.path.join(cfg.comp_data_dir, 'topics.csv')) display(topic_df.head())
code
121153361/cell_14
[ "text_html_output_2.png", "text_html_output_1.png" ]
from google.colab import drive from transformers import AutoTokenizer, AutoConfig, AutoModel import numpy as np import pandas as pd import torch class Config(object): competition_name = 'LECR' seed = 2022 seeds = [1, 11, 111, 1111, 11111, 2, 22, 222, 2222] env = 'kaggle' ver = 'v17g' if env == 'colab': from google.colab import drive drive.mount('/content/drive') mode = 'infer' use_tqdm = True use_log = False debug = False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if env in ['colab', 'vastai']: backbone = 'sentence-transformers/paraphrase-multilingual-mpnet-base-v2' elif env == 'kaggle': backbone = '/kaggle/input/paraphrasemultilingualmpnetbasev2' tokenizer = AutoTokenizer.from_pretrained(backbone) config = AutoConfig.from_pretrained(backbone) sep_token = '[LECR]' sep_token_id = tokenizer.vocab_size + 1 special_tokens_dict = {'additional_special_tokens': [sep_token]} tokenizer.add_special_tokens(special_tokens_dict) embedding_model = 'v17a' nfolds = 5 languages_map = {'ar': 0, 'as': 1, 'bg': 2, 'bn': 3, 'en': 4, 'es': 5, 'fil': 6, 'fr': 7, 'gu': 8, 'hi': 9, 'it': 10, 'km': 11, 'kn': 12, 'mr': 13, 'mul': 14, 'my': 15, 'or': 16, 'pl': 17, 'pnb': 18, 'pt': 19, 'ru': 20, 'sw': 21, 'swa': 22, 'ta': 23, 'te': 24, 'tr': 25, 'ur': 26, 'zh': 27} max_len = 128 batch_size = 128 if not debug else 4 num_workers = os.cpu_count() apex = True thres = {'cosine': None, 'num_k': 2000} if env == 'colab': comp_data_dir = f'/content/drive/My Drive/Kaggle competitions/{competition_name}/comp_data' ext_data_dir = f'/content/drive/My Drive/Kaggle competitions/{competition_name}/ext_data' model_dir = f'/content/drive/My Drive/Kaggle competitions/{competition_name}/model' embedding_model_dir = f'{model_dir}/{embedding_model[:-1]}/{embedding_model[-1]}' os.makedirs(os.path.join(model_dir, ver[:-1], ver[-1]), exist_ok=True) elif env == 'kaggle': comp_data_dir = '/kaggle/input/learning-equality-curriculum-recommendations' ext_data_dir = '/kaggle/input/lecrext-data' embedding_model_dir = f'/kaggle/input/lecr{embedding_model}' model_dir = f'/kaggle/input/{competition_name.lower()}{ver.lower()}' elif env == 'vastai': comp_data_dir = 'data' ext_data_dir = 'ext_data' model_dir = f'model' embedding_model_dir = f'{model_dir}/{embedding_model[:-1]}/{embedding_model[-1]}' os.makedirs(os.path.join(model_dir, ver[:-1], ver[-1]), exist_ok=True) cfg = Config() def set_random_seed(seed, use_cuda=True): np.random.seed(seed) torch.manual_seed(seed) random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) if use_cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False set_random_seed(cfg.seed) def print_log(cfg, message): pass content_df = pd.read_csv(os.path.join(cfg.comp_data_dir, 'content.csv')) topic_df = pd.read_csv(os.path.join(cfg.comp_data_dir, 'topics.csv')) def process_data(cfg, df, is_content=False): df['title'] = df['title'].fillna(' ') df['description'] = df['description'].fillna(' ') if is_content: df['text'] = df['text'].fillna(' ') df['encoded_language'] = df['language'].map(cfg.languages_map) return df topic_df = process_data(cfg, topic_df) content_df = process_data(cfg, content_df, is_content=True) display(topic_df) display(content_df)
code
32064607/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.special import erfc import matplotlib.pyplot as plt import numpy as np def Dirichlet1(T0, dT, t, x, alpha): T = T0 + dT * erfc(abs(x) / (2.0 * np.sqrt(alpha * t))) return T T0 = 0.2 dT = 15.0 t1 = 10.0 t2 = 1000.0 xs = np.arange(0, 0.1, 0.001) alpha_m = 9.19e-08 T = Dirichlet1(T0, dT, t1, xs, alpha_m) T2 = Dirichlet1(T0, dT, t2, xs, alpha_m) fig0,ax0 = plt.subplots(1); # Gráfico. Definimos una figura fig0 y sus ejes ax0 fig0.set_size_inches((5,5)); #tamaño de la figura ax0.plot(xs,T,label='t=%.0f'%t1) # plot sobre los ejes ax0.plot(xs,T2,label='t=%.0f'%t2) ax0.set_xlim([min(xs),max(xs)]);#definimos límites ax0.set_xlabel('distancia (m)',fontsize=18); #nombre para ordenadas ax0.set_ylabel('Temperature ($^\circ$C)',fontsize=18); #nombre para abcisas ax0.grid(True); #grilla de coordenadas ax0.legend() def Dirichlet_armonica(T0, Ta, t, x, omega, alpha=1e-06): T = T0 + Ta * np.exp(-x * np.sqrt(np.pi * omega / alpha)) * np.sin(2.0 * np.pi * omega * t - x * np.sqrt(np.pi * omega / alpha)) return T # Constantes y parámetros T0 = 10. # temperatura media Ta = 1. # amplitud de la variación de temperatura ts = np.linspace(0,1e1,200) # tiempo xs = np.arange(0,40e-3,1e-4) # distancias omega = 0.2 # frecuencia (2 pi freq) alpha_l = 1.5e-5 #difusividad térmica del latón n_profiles = 5 # número de perfiles a plotear ######################################################################################### fig1,ax1 = plt.subplots(1) # Graficamos la condición de borde fig1.set_size_inches((4,4)) #plt.title(r'Condición de borde') ax1.plot(ts,T0+Ta*np.sin(2.*np.pi*omega*ts),'k'); ax1.set_ylabel('Temperatura en la superficie ($^\circ$C)',fontsize=12) ax1.set_xlabel('Tiempo',fontsize=12); #plt.tight_layout() fig2, ax2 = plt.subplots(1, 2, figsize=(10, 4)) for t in ts[::len(ts) // n_profiles]: T = Dirichlet_armonica(T0, Ta, t, xs, omega, alpha_l) ax2[0].plot(xs, T, label='%.2f' % t) ax2[0].legend(title='Tiempos (s)') ax2[0].set_xlabel('Distancia (mm)') ax2[0].set_ylabel('Temperature ($^\\circ$C)') ax2[0].set_xlim([min(xs), max(xs)]) ax2[0].set_title('Perfiles para un tiempo dado') for x in xs[::len(xs) // n_profiles]: T = Dirichlet_armonica(T0, Ta, ts, x, omega, alpha_l) ax2[1].plot(T, ts, label='%0d' % (x * 1000)) ax2[1].legend(title='Distancias (mm)', bbox_to_anchor=(1, 1)) ax2[1].set_xlabel('Temperatura ($^\\circ$C)') ax2[1].set_ylabel('Tiempo') ax2[1].set_title('Series temporales para una distancia')
code
32064607/cell_9
[ "text_plain_output_1.png" ]
from scipy.special import erfc import matplotlib.pyplot as plt import numpy as np def Dirichlet1(T0, dT, t, x, alpha): T = T0 + dT * erfc(abs(x) / (2.0 * np.sqrt(alpha * t))) return T T0 = 0.2 dT = 15.0 t1 = 10.0 t2 = 1000.0 xs = np.arange(0, 0.1, 0.001) alpha_m = 9.19e-08 T = Dirichlet1(T0, dT, t1, xs, alpha_m) T2 = Dirichlet1(T0, dT, t2, xs, alpha_m) fig0, ax0 = plt.subplots(1) fig0.set_size_inches((5, 5)) ax0.plot(xs, T, label='t=%.0f' % t1) ax0.plot(xs, T2, label='t=%.0f' % t2) ax0.set_xlim([min(xs), max(xs)]) ax0.set_xlabel('distancia (m)', fontsize=18) ax0.set_ylabel('Temperature ($^\\circ$C)', fontsize=18) ax0.grid(True) ax0.legend()
code
32064607/cell_4
[ "image_output_1.png" ]
!pip install ht;
code
32064607/cell_15
[ "image_output_1.png" ]
import ht as ht madera = ht.nearest_material('wood') acero = ht.nearest_material('steel') print('El coeficiente de conducción de la madera es %.3f W/m K' % ht.k_material(madera)) print('El coeficiente de conducción del acero es %.3f W/ m K' % ht.k_material(acero)) a_madera = ht.k_material(madera) / (ht.rho_material(madera) * ht.Cp_material(madera)) a_acero = ht.k_material(acero) / (ht.rho_material(acero) * ht.Cp_material(acero)) print('La difusividad térmica de la madera es %.2g m2/s' % a_madera) print('La difusividad térmica del acero es %.2g m2/s' % a_acero)
code
32064607/cell_12
[ "text_plain_output_1.png" ]
from scipy.special import erfc import matplotlib.pyplot as plt import numpy as np def Dirichlet1(T0, dT, t, x, alpha): T = T0 + dT * erfc(abs(x) / (2.0 * np.sqrt(alpha * t))) return T T0 = 0.2 dT = 15.0 t1 = 10.0 t2 = 1000.0 xs = np.arange(0, 0.1, 0.001) alpha_m = 9.19e-08 T = Dirichlet1(T0, dT, t1, xs, alpha_m) T2 = Dirichlet1(T0, dT, t2, xs, alpha_m) fig0,ax0 = plt.subplots(1); # Gráfico. Definimos una figura fig0 y sus ejes ax0 fig0.set_size_inches((5,5)); #tamaño de la figura ax0.plot(xs,T,label='t=%.0f'%t1) # plot sobre los ejes ax0.plot(xs,T2,label='t=%.0f'%t2) ax0.set_xlim([min(xs),max(xs)]);#definimos límites ax0.set_xlabel('distancia (m)',fontsize=18); #nombre para ordenadas ax0.set_ylabel('Temperature ($^\circ$C)',fontsize=18); #nombre para abcisas ax0.grid(True); #grilla de coordenadas ax0.legend() def Dirichlet_armonica(T0, Ta, t, x, omega, alpha=1e-06): T = T0 + Ta * np.exp(-x * np.sqrt(np.pi * omega / alpha)) * np.sin(2.0 * np.pi * omega * t - x * np.sqrt(np.pi * omega / alpha)) return T T0 = 10.0 Ta = 1.0 ts = np.linspace(0, 10.0, 200) xs = np.arange(0, 0.04, 0.0001) omega = 0.2 alpha_l = 1.5e-05 n_profiles = 5 fig1, ax1 = plt.subplots(1) fig1.set_size_inches((4, 4)) ax1.plot(ts, T0 + Ta * np.sin(2.0 * np.pi * omega * ts), 'k') ax1.set_ylabel('Temperatura en la superficie ($^\\circ$C)', fontsize=12) ax1.set_xlabel('Tiempo', fontsize=12)
code
2030627/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train['SalePrice'].describe()
code
2030627/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import seaborn as sns import numpy as np from scipy.stats import norm from sklearn.preprocessing import StandardScaler from scipy import stats import matplotlib.pyplot as plt from scipy.stats import skew from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2030627/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') saleprice = pd.DataFrame({'saleprice_skewed': train['SalePrice']}) saleprice.hist()
code
73073991/cell_15
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt import numpy as np import seaborn as sns import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objects as go import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.linear_model import LinearRegression sns.set_theme() class LinearRegression: def __init__(self, learning_rate=0.1, n_iters=100): self.lr = learning_rate self.n_iters = n_iters self.weights = None self.bias = None def fit(self, X, y): n_samples, n_features = X.shape self.weights = np.zeros(n_features) self.bias = 0 for _ in range(self.n_iters): y_predicted = self.predict(X) dw = (X.T * (y_predicted - y)).T.mean(axis=0) db = (y_predicted - y).mean(axis=0) self.weights -= self.lr * dw self.bias -= self.lr * db def predict(self, X): return X @ self.weights + self.bias fig, ax = plt.subplots(figsize=(14, 7)) sns.scatterplot(x=X_train[:, 0], y=y_train, ax=ax); model = LinearRegression(n_iters=10000) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) x_values = np.linspace(-0.1, 0.2, 100) y_values = [model.bias + model.weights[0] * x for x in x_values] fig, ax = plt.subplots(figsize=(14, 7)) sns.scatterplot(x=X_train[:, 0], y=y_train, ax=ax) sns.lineplot(x=x_values, y=y_values)
code
73073991/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error model = LinearRegression(n_iters=10000) model.fit(X_train, y_train) MSE = mean_squared_error(y_test, model.predict(X_test)) print('MSE: {}'.format(MSE))
code
73073991/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objects as go import matplotlib.pyplot as plt import seaborn as sns from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.linear_model import LinearRegression sns.set_theme() fig, ax = plt.subplots(figsize=(14, 7)) sns.scatterplot(x=X_train[:, 0], y=y_train, ax=ax)
code
130012223/cell_63
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() print('GNB took ', res2 - res1, 'seconds')
code
130012223/cell_25
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns columns_to_drop = ['Flow ID', ' Source IP', ' Destination IP', ' Timestamp'] df = df.drop(columns_to_drop, axis=1) df.columns df = df[~np.isinf(df['Flow Bytes/s'])] df.dropna(inplace=True) X = df.drop(' Label', axis=1) y = df[' Label'] num_columns = X.shape[1] print(num_columns)
code
130012223/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import time from sklearn.feature_selection import SelectKBest, f_classif from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import precision_score
code
130012223/cell_56
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) recall = recall_score(y_test, y_pred1, pos_label='DDoS') res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) recall = recall_score(y_test, y_pred1, pos_label='DDoS') print('Recall:', recall)
code
130012223/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns columns_to_drop = ['Flow ID', ' Source IP', ' Destination IP', ' Timestamp'] df = df.drop(columns_to_drop, axis=1) df.columns df = df[~np.isinf(df['Flow Bytes/s'])] df.dropna(inplace=True) X = df.drop(' Label', axis=1) y = df[' Label'] num_columns = X.shape[1] k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) selected_feature_names = X.columns[selector.get_support()] selected_feature_names
code
130012223/cell_33
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() print('KNN took ', res2 - res1, 'seconds')
code
130012223/cell_39
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import precision_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) precision = precision_score(y_test, y_pred1, average='weighted') print('Precision score:', precision)
code
130012223/cell_65
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import numpy as np import pandas as pd import time df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns columns_to_drop = ['Flow ID', ' Source IP', ' Destination IP', ' Timestamp'] df = df.drop(columns_to_drop, axis=1) df.columns df = df[~np.isinf(df['Flow Bytes/s'])] df.dropna(inplace=True) k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() k = 5 kf = KFold(n_splits=k) scores = cross_val_score(knn, X_train_selected, y_train, cv=kf) mean_accuracy = np.mean(scores) res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() k = 5 kf = KFold(n_splits=k) scores = cross_val_score(rf, X_train_selected, y_train, cv=kf) mean_accuracy = np.mean(scores) from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() k = 5 kf = KFold(n_splits=k) scores = cross_val_score(gnb, X_train_selected, y_train, cv=kf) for fold_idx, score in enumerate(scores): print(f'Fold {fold_idx + 1} accuracy: {score}') mean_accuracy = np.mean(scores) print(f'\nMean accuracy: {mean_accuracy}')
code
130012223/cell_48
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() print('RandomForest took ', res2 - res1, 'seconds')
code
130012223/cell_73
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) f1 = f1_score(y_test, y_pred1, pos_label='DDoS') res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) f1 = f1_score(y_test, y_pred1, pos_label='DDoS') from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = gnb.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) f1 = f1_score(y_test, y_pred1, pos_label='DDoS') print('F1 Score: ', f1)
code
130012223/cell_41
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import recall_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) recall = recall_score(y_test, y_pred1, pos_label='DDoS') print('Recall:', recall)
code
130012223/cell_54
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) precision = precision_score(y_test, y_pred1, average='weighted') res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) precision = precision_score(y_test, y_pred1, average='weighted') print('Precision score:', precision)
code
130012223/cell_67
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = gnb.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) print('Accuracy score:', accuracy)
code
130012223/cell_60
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) cm = confusion_matrix(y_test, y_pred1) res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) cm = confusion_matrix(y_test, y_pred1) print('Confusion Matrix:') print(cm)
code
130012223/cell_69
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) precision = precision_score(y_test, y_pred1, average='weighted') res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) precision = precision_score(y_test, y_pred1, average='weighted') from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = gnb.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) precision = precision_score(y_test, y_pred1, average='weighted') print('Precision score:', precision)
code
130012223/cell_50
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier import numpy as np import pandas as pd import time df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns columns_to_drop = ['Flow ID', ' Source IP', ' Destination IP', ' Timestamp'] df = df.drop(columns_to_drop, axis=1) df.columns df = df[~np.isinf(df['Flow Bytes/s'])] df.dropna(inplace=True) k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() k = 5 kf = KFold(n_splits=k) scores = cross_val_score(knn, X_train_selected, y_train, cv=kf) mean_accuracy = np.mean(scores) res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() k = 5 kf = KFold(n_splits=k) scores = cross_val_score(rf, X_train_selected, y_train, cv=kf) for fold_idx, score in enumerate(scores): print(f'Fold {fold_idx + 1} accuracy: {score}') mean_accuracy = np.mean(scores) print(f'\nMean accuracy: {mean_accuracy}')
code
130012223/cell_52
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) print('Accuracy score:', accuracy)
code
130012223/cell_45
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) cm = confusion_matrix(y_test, y_pred1) print('Confusion Matrix:') print(cm)
code
130012223/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns columns_to_drop = ['Flow ID', ' Source IP', ' Destination IP', ' Timestamp'] df = df.drop(columns_to_drop, axis=1) df.columns
code
130012223/cell_58
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) f1 = f1_score(y_test, y_pred1, pos_label='DDoS') res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) f1 = f1_score(y_test, y_pred1, pos_label='DDoS') print('F1 Score: ', f1)
code
130012223/cell_28
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test)
code
130012223/cell_78
[ "text_plain_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() from sklearn import svm res1 = time.time() clf = svm.SVC(kernel='poly', degree=3, gamma='scale') clf.fit(X_train_selected, y_train) res2 = time.time() print('SVM took ', res2 - res1, 'seconds')
code
130012223/cell_75
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) cm = confusion_matrix(y_test, y_pred1) res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) cm = confusion_matrix(y_test, y_pred1) from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = gnb.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) cm = confusion_matrix(y_test, y_pred1) print('Confusion Matrix:') print(cm)
code
130012223/cell_35
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier import numpy as np import pandas as pd import time df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns columns_to_drop = ['Flow ID', ' Source IP', ' Destination IP', ' Timestamp'] df = df.drop(columns_to_drop, axis=1) df.columns df = df[~np.isinf(df['Flow Bytes/s'])] df.dropna(inplace=True) k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() k = 5 kf = KFold(n_splits=k) scores = cross_val_score(knn, X_train_selected, y_train, cv=kf) for fold_idx, score in enumerate(scores): print(f'Fold {fold_idx + 1} accuracy: {score}') mean_accuracy = np.mean(scores) print(f'\nMean accuracy: {mean_accuracy}')
code
130012223/cell_43
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import f1_score from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) f1 = f1_score(y_test, y_pred1, pos_label='DDoS') print('F1 Score:', f1)
code
130012223/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ddos-dataset/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv') df.columns
code
130012223/cell_37
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import SelectKBest, f_classif from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) print('Accuracy score= {:.8f}'.format(knn.score(X_test_selected, y_test)))
code
130012223/cell_71
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectKBest, f_classif from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier import time k = 15 selector = SelectKBest(score_func=f_classif, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) from sklearn.neighbors import KNeighborsClassifier res1 = time.time() knn = KNeighborsClassifier() knn = knn.fit(X_train_selected, y_train) knn res2 = time.time() y_pred1 = knn.predict(X_test_selected) recall = recall_score(y_test, y_pred1, pos_label='DDoS') res1 = time.time() rf = RandomForestClassifier() rf.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = rf.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) recall = recall_score(y_test, y_pred1, pos_label='DDoS') from sklearn.naive_bayes import GaussianNB res1 = time.time() gnb = GaussianNB() gnb.fit(X_train_selected, y_train) res2 = time.time() y_pred1 = gnb.predict(X_test_selected) accuracy = accuracy_score(y_test, y_pred1) recall = recall_score(y_test, y_pred1, pos_label='DDoS') print('Recall:', recall)
code
129030866/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df.info()
code
129030866/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape
code
129030866/cell_57
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df.sort_values(by=['hashtag_count'], ascending=False).head(5) train_df.sort_values(by=['mentions_count'], ascending=False).head(5) train_df.sort_values(by=['url_count'], ascending=False).head(5)
code
129030866/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) train_df['keyword'].value_counts().head(10).sort_values().plot(kind='barh') keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(10, 10), sharex=True) keyword_target.head(10)[[0, 1]].plot(kind='barh', ax=axes[0]) keyword_target.tail(10)[[0, 1]].plot(kind='barh', ax=axes[1]) axes[0].set_title('Top 10 keywords by disaster_ratio') axes[1].set_title('Bottom 10 keywords by disaster_ratio') plt.show()
code
129030866/cell_44
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) train_df['keyword'].value_counts().head(10).sort_values().plot(kind='barh') keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) # plot the top 10 and bottom 10 keywords by disaster_ratio fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(10, 10), sharex=True) keyword_target.head(10)[[0, 1]].plot(kind='barh', ax=axes[0]) keyword_target.tail(10)[[0, 1]].plot(kind='barh', ax=axes[1]) axes[0].set_title('Top 10 keywords by disaster_ratio') axes[1].set_title('Bottom 10 keywords by disaster_ratio') plt.show() train_df['location'].value_counts().head(10).sort_values().plot(kind='barh') train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) plt.figure(figsize=(12, 7)) sns.histplot(data=train_df, x='text_length', hue='target', kde=True, element='step') plt.title('Distribution of Tweet Text Lengths') plt.show()
code
129030866/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) train_df['keyword'].value_counts().head(10).sort_values().plot(kind='barh') plt.title('Top 10 Keywords') plt.show()
code
129030866/cell_76
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import re import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape test_df = pd.read_csv(data_path + 'test.csv') test_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) def extract_text(pattern, tweet_text): """Extract text from a string using a regex pattern""" pat_match = re.findall(pattern, tweet_text) return pat_match def count_pattern(pattern, tweet_text): """Count the number of times a pattern occurs in a string""" pat_match = re.findall(pattern, tweet_text) return len(pat_match) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df.sort_values(by=['hashtag_count'], ascending=False).head(5) train_df.sort_values(by=['mentions_count'], ascending=False).head(5) train_df.sort_values(by=['url_count'], ascending=False).head(5) def replace_url_in_text(df: pd.DataFrame, col_name: str) -> pd.DataFrame: """ Use a regular expression to match URLs in the text of Twitter posts, which start with 'http' or 'https' and contain various allowed characters. Replace the URLs with the special token [URL]. """ pattern = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' df[col_name] = df[col_name].apply(lambda x: re.sub(pattern, '[URL]', x)) return df train_df = replace_url_in_text(train_df, 'text') train_df.sort_values(by='url_count', ascending=False).head() def replace_mention_in_text(df: pd.DataFrame, col_name: str) -> pd.DataFrame: """ Use a regular expression to match mentions in the text of Twitter posts, which start with '@' followed by a username containing alphanumeric characters and underscores. Replace the mention with the special token [MT]. """ pattern = '@(\\w+)' df[col_name] = df[col_name].apply(lambda x: re.sub(pattern, '[MT]', x)) return df train_df = replace_mention_in_text(train_df, 'text') train_df.sort_values(by='mentions_count', ascending=False).head()
code
129030866/cell_40
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) print('count of tweets with duplicate text:', train_df.duplicated(['text']).sum())
code
129030866/cell_39
[ "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) train_df.drop_duplicates(['text', 'target'], inplace=True) print('remaining tweets with duplicate text and different target:', train_df.duplicated(['text']).sum())
code
129030866/cell_65
[ "text_plain_output_1.png", "image_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer model_name = 'microsoft/deberta-v3-base' tokenizer = AutoTokenizer.from_pretrained(model_name) new_token_list = ['[URL]', '[MT]', '[HT]', '[MV]'] tokenizer.add_special_tokens({'additional_special_tokens': new_token_list})
code
129030866/cell_72
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import re import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape test_df = pd.read_csv(data_path + 'test.csv') test_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) def extract_text(pattern, tweet_text): """Extract text from a string using a regex pattern""" pat_match = re.findall(pattern, tweet_text) return pat_match def count_pattern(pattern, tweet_text): """Count the number of times a pattern occurs in a string""" pat_match = re.findall(pattern, tweet_text) return len(pat_match) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df.sort_values(by=['hashtag_count'], ascending=False).head(5) train_df.sort_values(by=['mentions_count'], ascending=False).head(5) train_df.sort_values(by=['url_count'], ascending=False).head(5) def replace_url_in_text(df: pd.DataFrame, col_name: str) -> pd.DataFrame: """ Use a regular expression to match URLs in the text of Twitter posts, which start with 'http' or 'https' and contain various allowed characters. Replace the URLs with the special token [URL]. """ pattern = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' df[col_name] = df[col_name].apply(lambda x: re.sub(pattern, '[URL]', x)) return df train_df = replace_url_in_text(train_df, 'text') train_df.sort_values(by='url_count', ascending=False).head()
code
129030866/cell_19
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) print(f"Count of records with missing keyword entry: {train_df['keyword'].isnull().sum()}")
code
129030866/cell_7
[ "text_plain_output_1.png" ]
import os import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129030866/cell_45
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) train_df['keyword'].value_counts().head(10).sort_values().plot(kind='barh') keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) # plot the top 10 and bottom 10 keywords by disaster_ratio fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(10, 10), sharex=True) keyword_target.head(10)[[0, 1]].plot(kind='barh', ax=axes[0]) keyword_target.tail(10)[[0, 1]].plot(kind='barh', ax=axes[1]) axes[0].set_title('Top 10 keywords by disaster_ratio') axes[1].set_title('Bottom 10 keywords by disaster_ratio') plt.show() train_df['location'].value_counts().head(10).sort_values().plot(kind='barh') train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) sns.boxplot(x='target', y='text_length', data=train_df) plt.title('Distribution of Tweet Text Lengths by Target') plt.show()
code
129030866/cell_49
[ "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df.sort_values(by=['hashtag_count'], ascending=False).head(5)
code
129030866/cell_62
[ "text_html_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer model_name = 'microsoft/deberta-v3-base' tokenizer = AutoTokenizer.from_pretrained(model_name)
code
129030866/cell_59
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) train_df['keyword'].value_counts().head(10).sort_values().plot(kind='barh') keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) # plot the top 10 and bottom 10 keywords by disaster_ratio fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(10, 10), sharex=True) keyword_target.head(10)[[0, 1]].plot(kind='barh', ax=axes[0]) keyword_target.tail(10)[[0, 1]].plot(kind='barh', ax=axes[1]) axes[0].set_title('Top 10 keywords by disaster_ratio') axes[1].set_title('Bottom 10 keywords by disaster_ratio') plt.show() train_df['location'].value_counts().head(10).sort_values().plot(kind='barh') train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df.sort_values(by=['hashtag_count'], ascending=False).head(5) train_df.sort_values(by=['mentions_count'], ascending=False).head(5) train_df.sort_values(by=['url_count'], ascending=False).head(5) fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(18, 5)) sns.boxplot(x='target', y='hashtag_count', data=train_df, ax=axes[0]) sns.boxplot(x='target', y='mentions_count', data=train_df, ax=axes[1]) sns.boxplot(x='target', y='url_count', data=train_df, ax=axes[2]) axes[0].set_title('Hashtag Count Distribution by Target') axes[1].set_title('Mentions Count Distribution by Target') axes[2].set_title('URL Count Distribution by Target')
code
129030866/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) train_df['keyword'].value_counts().head(10).sort_values().plot(kind='barh') keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) # plot the top 10 and bottom 10 keywords by disaster_ratio fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(10, 10), sharex=True) keyword_target.head(10)[[0, 1]].plot(kind='barh', ax=axes[0]) keyword_target.tail(10)[[0, 1]].plot(kind='barh', ax=axes[1]) axes[0].set_title('Top 10 keywords by disaster_ratio') axes[1].set_title('Bottom 10 keywords by disaster_ratio') plt.show() train_df['location'].value_counts().head(10).sort_values().plot(kind='barh') plt.title('Top 10 Locations') plt.show()
code
129030866/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape sns.countplot(data=train_df, x='target') plt.title('Target Variable Distribution') plt.show()
code
129030866/cell_66
[ "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer model_name = 'microsoft/deberta-v3-base' tokenizer = AutoTokenizer.from_pretrained(model_name) new_token_list = ['[URL]', '[MT]', '[HT]', '[MV]'] tokenizer.add_special_tokens({'additional_special_tokens': new_token_list}) tokenizer.all_special_tokens
code
129030866/cell_43
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df['text_length'].describe(percentiles=[0, 0.25, 0.5, 0.75, 0.9, 0.99]).round(0)
code
129030866/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df.head()
code
129030866/cell_22
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) keyword_target.head()
code
129030866/cell_53
[ "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) train_df.drop_duplicates(['text', 'target'], inplace=True) train_df.drop_duplicates(['text'], inplace=True) train_df.sort_values(by=['hashtag_count'], ascending=False).head(5) train_df.sort_values(by=['mentions_count'], ascending=False).head(5)
code
129030866/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape test_df = pd.read_csv(data_path + 'test.csv') test_df.shape
code
129030866/cell_27
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) print(f"Count of records with missing location entry: {train_df['location'].isnull().sum()}")
code
129030866/cell_37
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', None) import os import random import re import unicodedata import gc import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') import torch import torch.nn as nn import torch.optim as optim from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer from datasets import Dataset, DatasetDict data_path = '/kaggle/input/nlp-getting-started/' train_df = pd.read_csv(data_path + 'train.csv') train_df.shape train_df['target'] = np.float64(train_df.target) keyword_target = train_df.groupby(['keyword', 'target']).size().unstack(fill_value=0) keyword_target['disaster_ratio'] = keyword_target[1] / (keyword_target[0] + keyword_target[1]) keyword_target = keyword_target.sort_values(by='disaster_ratio', ascending=False) print('count of tweets with duplicate text:', train_df.duplicated(['text']).sum()) print('count of tweets with duplicate text and target:', train_df.duplicated(['text', 'target']).sum())
code
32072718/cell_9
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.head()
code
32072718/cell_34
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature] year_feature
code
32072718/cell_40
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature] year_feature for feature in year_feature: if feature != 'YrSold': data = dataset.copy() data[feature] = data['YrSold'] - data[feature] discrete_feature = [feature for feature in numerical_features if len(dataset[feature].unique()) < 25 and feature not in year_feature + ['Id']] dataset[discrete_feature].head()
code
32072718/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature] year_feature for feature in year_feature: if feature != 'YrSold': data = dataset.copy() data[feature] = data['YrSold'] - data[feature] discrete_feature = [feature for feature in numerical_features if len(dataset[feature].unique()) < 25 and feature not in year_feature + ['Id']] discrete_feature
code
32072718/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] print('Number of numerical variables: ', len(numerical_features)) dataset[numerical_features].head()
code
32072718/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') print(dataset.shape)
code
32072718/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) data.groupby(feature)['SalePrice'].median().plot.bar() plt.title(feature) plt.show()
code
32072718/cell_8
[ "text_html_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') type(dataset)
code
32072718/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: print(feature, np.round(dataset[feature].isnull().mean(), 4), ' % missing values')
code
32072718/cell_38
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature] year_feature for feature in year_feature: if feature != 'YrSold': data = dataset.copy() data[feature] = data['YrSold'] - data[feature] discrete_feature = [feature for feature in numerical_features if len(dataset[feature].unique()) < 25 and feature not in year_feature + ['Id']] print('Discrete Variables Count: {}'.format(len(discrete_feature)))
code
32072718/cell_35
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature] year_feature for feature in year_feature: print(feature, np.sort(dataset[feature].unique())) print('\n')
code
32072718/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) print('Id of Houses {}'.format(len(dataset.Id)))
code
32072718/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.tail()
code
32072718/cell_37
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] year_feature = [feature for feature in numerical_features if 'Yr' in feature or 'Year' in feature] year_feature for feature in year_feature: if feature != 'YrSold': data = dataset.copy() data[feature] = data['YrSold'] - data[feature] plt.scatter(data[feature], data['SalePrice']) plt.xlabel(feature) plt.ylabel('SalePrice') plt.show()
code
32072718/cell_36
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd dataset = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') dataset.isnull().sum() features_with_na = [] for features in dataset.columns: if dataset[features].isnull().sum() > 1: features_with_na.append(features) features_with_na features_with_na = [features for features in dataset.columns if dataset[features].isnull().sum() > 1] for feature in features_with_na: data = dataset.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) numerical_features = [feature for feature in dataset.columns if dataset[feature].dtypes != 'O'] dataset.groupby('YrSold')['SalePrice'].median().plot() plt.xlabel('Year Sold') plt.ylabel('Median House Price') plt.title('House Price vs YearSold')
code