path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
48165025/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.isnull().sum() sns.set(font_scale=1.4) plt.figure(figsize=(10, 6)) sns.set_style('whitegrid') sns.countplot(x='Survived', data=train, palette='RdBu_r') plt.title('Survived/not survived') plt.show()
code
48165025/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.isnull().sum() plt.figure(figsize=(10, 6)) sns.heatmap(train.isnull(), cbar=False, yticklabels=False, cmap='coolwarm') sns.set(font_scale=1.4) plt.title('Missing data features', fontsize=20) plt.show()
code
48165025/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') titanic_dict = {'survived': 'survived', 'Pclass': 'Ticket class', 'sex': 'Sex', 'Age': 'Age in years', 'Sibsp': '# of siblings / spouses aboard the Titanic', 'parch': '# of parents / children aboard the Titanic', 'ticket': 'Ticket number', 'Fare': 'Passenger fare', 'cabin': 'Cabin number', 'Embarked': 'Port of Embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)'} train.isnull().sum() sns.set(font_scale=1.4) sns.set_style('whitegrid') sns.set_style('whitegrid') plt.figure(figsize=(10, 6)) sns.distplot(train['Age'].dropna(), kde=False, color='darkred', bins=30) plt.title('Histogram of passengers age') plt.show()
code
48165025/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import cufflinks as cf cf.go_offline() from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score, roc_curve, roc_auc_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation, Dropout from tensorflow.keras.callbacks import EarlyStopping import warnings
code
48165025/cell_7
[ "image_output_1.png" ]
import pandas as pd import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.info()
code
48165025/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.isnull().sum()
code
48165025/cell_8
[ "image_output_1.png" ]
import pandas as pd import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.head()
code
48165025/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') test.head()
code
48165025/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') test.describe()
code
48165025/cell_31
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') titanic_dict = {'survived': 'survived', 'Pclass': 'Ticket class', 'sex': 'Sex', 'Age': 'Age in years', 'Sibsp': '# of siblings / spouses aboard the Titanic', 'parch': '# of parents / children aboard the Titanic', 'ticket': 'Ticket number', 'Fare': 'Passenger fare', 'cabin': 'Cabin number', 'Embarked': 'Port of Embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)'} train.isnull().sum() sns.set(font_scale=1.4) sns.set_style('whitegrid') sns.set_style('whitegrid') plt.figure(figsize=(10, 6)) sns.distplot(train['Age'].dropna(), kde=False, color='darkred', bins=10) plt.title('Distribution of passengers age') plt.show()
code
48165025/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') titanic_dict = {'survived': 'survived', 'Pclass': 'Ticket class', 'sex': 'Sex', 'Age': 'Age in years', 'Sibsp': '# of siblings / spouses aboard the Titanic', 'parch': '# of parents / children aboard the Titanic', 'ticket': 'Ticket number', 'Fare': 'Passenger fare', 'cabin': 'Cabin number', 'Embarked': 'Port of Embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)'} train.isnull().sum() sns.set(font_scale=1.4) sns.set_style('whitegrid') sns.set_style('whitegrid') plt.figure(figsize=(10, 6)) sns.countplot(x='Survived', data=train, hue='Pclass') plt.legend(title=titanic_dict['Pclass']) plt.show()
code
128030873/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') dataset.info(verbose=True)
code
128030873/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') dataset.describe().T
code
128030873/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import seaborn as sns import warnings from mlxtend.plotting import plot_decision_regions import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() import warnings warnings.filterwarnings('ignore')
code
128030873/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128030873/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') dataset.describe().T dataset_copy = dataset.copy(deep=True) dataset_copy[['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']] = dataset_copy[['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']].replace(0, np.NaN) print(dataset_copy.isnull().sum())
code
128030873/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') dataset.head()
code
128030873/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/pima-indians-diabetes-database/diabetes.csv') dataset.describe()
code
128047807/cell_2
[ "text_plain_output_1.png" ]
import os import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms from torch.utils.data import Dataset, DataLoader from PIL import Image from torchvision.transforms.transforms import Resize import timm
code
128047807/cell_1
[ "text_plain_output_1.png" ]
!pip install torch torchvision !pip install timm !pip install scikit-learn
code
128047807/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd test_labels = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/test.csv') test_image_ids = test_labels['image_id'].values train_labels = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/train.csv') train_labels = train_labels[['healthy', 'multiple_diseases', 'rust', 'scab']] train_labels = train_labels.values train_labels
code
128047807/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import os import pandas as pd image_dir = '/kaggle/input/plant-pathology-2020-fgvc7/images' image_files = os.listdir(image_dir) image_paths = [os.path.join(image_dir, filename) for filename in image_files] import pandas as pd test_labels = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/test.csv') test_image_ids = test_labels['image_id'].values train_labels = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/train.csv') train_labels = train_labels[['healthy', 'multiple_diseases', 'rust', 'scab']] train_labels = train_labels.values from sklearn.model_selection import train_test_split train_image_paths = [path for path in image_paths if 'Train' in path] test_image_paths = [path for path in image_paths if 'Test' in path] train_image_paths, valid_image_paths, train_labels, valid_labels = train_test_split(train_image_paths, train_labels, test_size=0.2, random_state=123) print('Train Samples:', len(train_image_paths)) print('Validation Samples:', len(valid_image_paths)) print('Test Samples:', len(test_image_paths))
code
128047807/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import Dataset from torch.utils.data import Dataset, DataLoader import os import pandas as pd import timm import timm import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms torch.manual_seed(123) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') image_dir = '/kaggle/input/plant-pathology-2020-fgvc7/images' image_files = os.listdir(image_dir) image_paths = [os.path.join(image_dir, filename) for filename in image_files] import pandas as pd test_labels = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/test.csv') test_image_ids = test_labels['image_id'].values train_labels = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/train.csv') train_labels = train_labels[['healthy', 'multiple_diseases', 'rust', 'scab']] train_labels = train_labels.values from sklearn.model_selection import train_test_split train_image_paths = [path for path in image_paths if 'Train' in path] test_image_paths = [path for path in image_paths if 'Test' in path] train_image_paths, valid_image_paths, train_labels, valid_labels = train_test_split(train_image_paths, train_labels, test_size=0.2, random_state=123) from torch.utils.data import Dataset class CustomDataset(Dataset): def __init__(self, image_paths, labels=None, transform=None): self.image_paths = image_paths self.labels = labels self.transform = transform def __len__(self): return len(self.image_paths) def __getitem__(self, index): image_path = self.image_paths[index] image = Image.open(image_path).convert('RGB') if self.transform: image = self.transform(image) if self.labels is not None: label = self.labels[index] return (image, label) else: return image transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_dataset = CustomDataset(train_image_paths, train_labels, transform=transform) valid_dataset = CustomDataset(valid_image_paths, valid_labels, transform=transform) test_dataset = CustomDataset([os.path.join(image_dir, f'{image_id}.jpg') for image_id in test_image_ids], transform=transform) batch_size = 16 train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=4) import timm model = timm.create_model('vit_base_patch16_224', pretrained=True) num_classes = 4 model.head = nn.Linear(model.head.in_features, num_classes) model = model.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5) num_epochs = 50 for epoch in range(num_epochs): model.train() total_train_loss = 0.0 total_train_correct = 0 total_train_samples = 0 for images, labels in train_loader: images = images.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(images) _, predicted = torch.max(outputs, 1) total_train_correct += (predicted == labels.argmax(dim=1)).sum().item() total_train_samples += labels.size(0) labels_float = labels.float() loss = criterion(outputs, labels_float) loss.backward() nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) optimizer.step() total_train_loss += loss.item() train_accuracy = total_train_correct / total_train_samples train_loss = total_train_loss / len(train_loader) model.eval() total_valid_loss = 0.0 total_valid_correct = 0 total_valid_samples = 0 with torch.no_grad(): for images, labels in valid_loader: images = images.to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs, 1) total_valid_correct += (predicted == labels.argmax(dim=1)).sum().item() total_valid_samples += labels.size(0) labels_float = labels.float() loss = criterion(outputs, labels_float) total_valid_loss += loss.item() valid_accuracy = total_valid_correct / total_valid_samples valid_loss = total_valid_loss / len(valid_loader) scheduler.step(valid_loss) print(f'Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Train Accuracy: {train_accuracy:.4f}') print(f'Epoch [{epoch + 1}/{num_epochs}], Valid Loss: {valid_loss:.4f}, Valid Accuracy: {valid_accuracy:.4f}')
code
74041348/cell_13
[ "text_plain_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies alive2 = ps.concat([alive, category_dummies, gender_dummies], axis=1) alive2.head(10)
code
74041348/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners.head(10)
code
74041348/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes
code
74041348/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.head(10)
code
74041348/cell_11
[ "text_html_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) print(winners1.dtypes)
code
74041348/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74041348/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum()
code
74041348/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as ps import seaborn as sn import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies alive2 = ps.concat([alive, category_dummies, gender_dummies], axis=1) CountryVsCat = alive2.groupby('bornCountryCode')['peace'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['chemistry'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['literature'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['economics'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['medicine'].sum() plt.figure(figsize=(28, 11)) sn.barplot(x=CountryVsCat.index, y=CountryVsCat.values, palette='rainbow').set_title('medicine - Counts')
code
74041348/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as ps import seaborn as sn import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies alive2 = ps.concat([alive, category_dummies, gender_dummies], axis=1) CountryVsCat = alive2.groupby('bornCountryCode')['peace'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['chemistry'].sum() plt.figure(figsize=(28, 11)) sn.barplot(x=CountryVsCat.index, y=CountryVsCat.values, palette='rainbow').set_title('Chemistry - Counts')
code
74041348/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as ps import seaborn as sn import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies alive2 = ps.concat([alive, category_dummies, gender_dummies], axis=1) CountryVsCat = alive2.groupby('bornCountryCode')['peace'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['chemistry'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['literature'].sum() plt.figure(figsize=(28, 11)) sn.barplot(x=CountryVsCat.index, y=CountryVsCat.values, palette='rainbow').set_title('literature - Counts')
code
74041348/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.describe()
code
74041348/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as ps import seaborn as sn import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies alive2 = ps.concat([alive, category_dummies, gender_dummies], axis=1) CountryVsCat = alive2.groupby('bornCountryCode')['peace'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['chemistry'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['literature'].sum() CountryVsCat = alive2.groupby('bornCountryCode')['economics'].sum() plt.figure(figsize=(28, 11)) sn.barplot(x=CountryVsCat.index, y=CountryVsCat.values, palette='rainbow').set_title('economics - Counts')
code
74041348/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as ps import seaborn as sn import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies alive2 = ps.concat([alive, category_dummies, gender_dummies], axis=1) CountryVsCat = alive2.groupby('bornCountryCode')['peace'].sum() plt.figure(figsize=(28, 11)) sn.barplot(x=CountryVsCat.index, y=CountryVsCat.values, palette='rainbow')
code
74041348/cell_10
[ "text_html_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape
code
74041348/cell_12
[ "text_plain_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape winners_raw.isnull().sum() winners = winners_raw.drop(columns=['id', 'died', 'bornCity', 'bornCountry', 'diedCountry', 'diedCountryCode', 'diedCity', 'motivation', 'overallMotivation', 'name', 'city', 'country'], axis=1) winners1 = winners.dropna(subset=['year', 'share', 'category']) winners1.shape dtype_converter = {'firstname': str, 'surname': str, 'share': int, 'year': int, 'gender': str} winners1 = winners1.astype(dtype_converter) alive = winners1.loc[winners_raw['died'] == '0000-00-00'] category_dummies = ps.get_dummies(alive['category']) category_dummies gender_dummies = ps.get_dummies(alive['gender']) gender_dummies
code
74041348/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as ps import pandas as ps import matplotlib.pyplot as plt import seaborn as sn from pandas_profiling import ProfileReport winners_raw = ps.read_csv('/kaggle/input/nobel-prize-winners-19002020/nobel_prize_by_winner.csv') winners_raw.dtypes winners_raw.shape
code
17132106/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() dataset.hist(edgecolor='black', linewidth=2)
code
17132106/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() plt.figure(figsize=(20, 8)) dataset.plot(kind='box', sharex=False, sharey=False)
code
17132106/cell_9
[ "image_output_1.png" ]
import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size()
code
17132106/cell_4
[ "image_output_1.png" ]
import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset.head(5)
code
17132106/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') sns.pairplot(dataset, hue='Species', markers=['o', 's', 'D'], diag_kind='hist') sns.despine() plt.show()
code
17132106/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.head(5)
code
17132106/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') plt.figure(figsize=(12, 6)) plt.title('Compare the distribution of Petal Width') sns.boxplot(x='Species', y='PetalWidthCm', data=dataset) plt.show()
code
17132106/cell_8
[ "image_output_1.png" ]
import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.info()
code
17132106/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() sns.set(style='darkgrid', palette='deep') plt.figure(figsize=(12, 6)) plt.title('Compare the distribution of Sepal Length') sns.boxplot(x='Species', y='SepalLengthCm', data=dataset) plt.show()
code
17132106/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') plt.figure(figsize=(12, 6)) plt.title('Compare the distribution of Sepal Width') sns.boxplot(x='Species', y='SepalWidthCm', data=dataset) plt.show()
code
17132106/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') sns.set(style='darkgrid', palette='deep') plt.figure(figsize=(12, 6)) plt.title('Compare the distribution of Petal Length') sns.boxplot(x='Species', y='PetalLengthCm', data=dataset) plt.show()
code
17132106/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() dataset.boxplot(by='Species', figsize=(12, 8))
code
17132106/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data = '../input/Iris.csv' dataset = pd.read_csv(data) dataset = dataset.drop('Id', axis=1) dataset.groupby('Species').size() dataset.describe()
code
73075982/cell_21
[ "text_plain_output_1.png" ]
import tensorflow as tf print('Num GPUs Available: ', len(tf.config.list_physical_devices('GPU')))
code
73075982/cell_26
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import TruncatedSVD from sklearn.metrics import confusion_matrix,jaccard_score,f1_score,recall_score from sklearn.metrics import precision_score,roc_auc_score,log_loss from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS import datetime import numpy as np import pandas as pd import sklearn.calibration as cal import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs import sklearn.pipeline as pipe import time import warnings import pandas as pd import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs from sklearn.model_selection import train_test_split, GridSearchCV import sklearn.svm as svm import sklearn.calibration as cal import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, brier_score_loss from sklearn.metrics import plot_precision_recall_curve, classification_report from sklearn.metrics import precision_score, roc_auc_score, log_loss from sklearn.metrics import confusion_matrix, jaccard_score, f1_score, recall_score from sklearn.preprocessing import FunctionTransformer import seaborn as sns import numpy as np import time import datetime import sklearn.pipeline as pipe from lime import lime_text from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.decomposition import TruncatedSVD import re import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.utils import to_categorical from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS stop_words = set(stopwords.words('english')) stopwords = set(STOPWORDS) import warnings warnings.filterwarnings('ignore', 'This pattern has match groups') df = pd.read_csv('../input/d/brandonbenton/botezlive-chat-classification/botezlive_data.csv') def split_data(df, params): df_ones = df.loc[df['is_offensive'] == 1] df_tmp = df.loc[df['is_offensive'] == 0] ratio = len(df_ones) / len(df_tmp) msk = np.random.rand(len(df_tmp)) < params['multiplier'] * ratio df_zeros = df_tmp.loc[msk] df = pd.concat([df_ones, df_zeros]).reset_index(drop=True) df_train, df_test = train_test_split(df, test_size=0.2, random_state=0) return (df_train, df_test) def get_vectorizer(df_train, params): vparams = dict({'stop_words': stop_words, 'min_df': params['min_df'], 'max_df': params['max_df'], 'smooth_idf': params['smooth_idf'], 'analyzer': params['analyzer'], 'ngram_range': params['ngram_range'], 'sublinear_tf': params['sublinear_tf'], 'max_features': params['max_features']}) vectorizer = ft.TfidfVectorizer(**vparams) x_train = vectorizer.fit_transform(df_train['text']) if params['trim features']: y = df_train['is_offensive'] x_names = vectorizer.get_feature_names() p_value_limit = params['pvalue limit'] dtf_features = pd.DataFrame() for cat in np.unique(y): chi2, p_value = fs.chi2(x_train, y == cat) entry = pd.DataFrame({'feature': x_names, 'score': 1 - p_value, 'y': cat}) dtf_features = dtf_features.append(entry) dtf_features = dtf_features.sort_values(['y', 'score'], ascending=[True, False]) dtf_features = dtf_features[dtf_features['score'] > p_value_limit] x_names = dtf_features['feature'].unique().tolist() vparams['vocabulary'] = x_names vectorizer = ft.TfidfVectorizer(**vparams) return vectorizer def get_dual_vectorizer(df_train, p1, p2): vec1 = get_vectorizer(df_train, p1) vec2 = get_vectorizer(df_train, p2) features = set(vec1.get_feature_names()) | set(vec2.get_feature_names()) features = list(features) vectorizer = ft.TfidfVectorizer(vocabulary=features, max_features=p1['max_features']) return vectorizer def get_pca(params): pca = TruncatedSVD(n_components=params['n_components']) return pca def transformer(x): return np.array(x.toarray()) def param_score(df, params): df_train, df_test = split_data(df, params) vectorizer = get_vectorizer(df_train, params) model = get_model(params) y_train = df_train['is_offensive'] y_test = df_test['is_offensive'] num_splits = 1 if params['classifier_type'] == 'NN': nn = KerasClassifier(build_fn=lambda: model) nn._estimator_type = 'classifier' vectorizer.fit(df_train['text']) num_rows = df_train.shape[0] num_splits = 1 if params['pca']: pca = get_pca(params) pca.fit(vectorizer.transform(df_train['text'])) if num_splits == 1 and params['pca']: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits == 1 and (not params['pca']): my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits > 1 and params['pca']: pca.fit(vectorizer.transform(df_train['text'])) df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(pca.transform(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) elif num_splits > 1 and (not params['pca']): df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(transformer(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) else: clf = cal.CalibratedClassifierCV(model, cv=p1['cv'], method='sigmoid') if params['pca']: pca = get_pca(params) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', clf)]) else: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('classifier', clf)]) my_pipeline.fit(df_train['text'], df_train['is_offensive']) score = my_pipeline.score(df_test['text'], df_test['is_offensive']) discrete_preds = my_pipeline.predict(df_test['text']) confusion = confusion_matrix(y_test, discrete_preds) test_ones = sum(confusion[1][:]) test_zeros = sum(confusion[0][:]) scores = dict({'classifier_type': params['classifier_type'], 'splits': num_splits, 'n_samples': df_train.shape[0], 'ngram_range': params['ngram_range'], 'pca': params['pca'], 'n_components': params['n_components'], 'max_features': params['max_features'], 'trim features': params['trim features'], 'pvalue limit': params['pvalue limit'], 'multiplier': params['multiplier'], 'precision': precision_score(y_test, discrete_preds), 'jaccard': jaccard_score(y_test, discrete_preds), 'recall': recall_score(y_test, discrete_preds), 'F1': f1_score(y_test, discrete_preds), 'TP': confusion[1][1] / test_ones, 'FP': confusion[0][1] / test_zeros, 'TN': confusion[0][0] / test_zeros, 'FN': confusion[1][0] / test_ones}) return (my_pipeline, discrete_preds, df_train, df_test, scores) headers = ['classifier_type', 'splits', 'n_samples', 'ngram_range', 'trim features', 'pca', 'n_components', 'max_features', 'pvalue limit', 'multiplier', 'precision', 'jaccard', 'recall', 'F1', 'TP', 'FP', 'TN', 'FN'] df_scores = pd.DataFrame(columns=headers) p1 = dict({'classifier_type': 'SVM', 'min_df': 1, 'max_df': 0.9, 'smooth_idf': 1, 'sublinear_tf': 1, 'ngram_range': (2, 10), 'max_features': None, 'C': 1.0, 'cv': 5, 'analyzer': 'char_wb', 'trim features': False, 'pvalue limit': 0.4, 'class_weight': None, 'multiplier': 1, 'n_components': 400, 'epochs': 10, 'pca': False}) p2 = p1.copy() p2['classifier_type'] = 'NN' p2['max_features'] = 20000 p2['pca'] = True params_list = [p1] for i, p in enumerate(params_list): start = time.time() model, discrete_preds, df_train, df_test, scores = param_score(df, p) df_scores = df_scores.append(scores, ignore_index=True) end = time.time() elapsed = end - start remaining_seconds = elapsed * (len(params_list) - i - 1) df_scores.sort_values(by='avg score', ascending=False, inplace=True) df_scores.head(1)
code
73075982/cell_32
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from lime import lime_text from nltk.corpus import stopwords from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix,jaccard_score,f1_score,recall_score from sklearn.metrics import plot_confusion_matrix,plot_roc_curve,brier_score_loss from sklearn.metrics import plot_precision_recall_curve,classification_report from sklearn.metrics import precision_score,roc_auc_score,log_loss from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.models import Sequential, Model from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.calibration as cal import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs import sklearn.pipeline as pipe import sklearn.svm as svm import warnings import pandas as pd import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs from sklearn.model_selection import train_test_split, GridSearchCV import sklearn.svm as svm import sklearn.calibration as cal import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, brier_score_loss from sklearn.metrics import plot_precision_recall_curve, classification_report from sklearn.metrics import precision_score, roc_auc_score, log_loss from sklearn.metrics import confusion_matrix, jaccard_score, f1_score, recall_score from sklearn.preprocessing import FunctionTransformer import seaborn as sns import numpy as np import time import datetime import sklearn.pipeline as pipe from lime import lime_text from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.decomposition import TruncatedSVD import re import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.utils import to_categorical from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS stop_words = set(stopwords.words('english')) stopwords = set(STOPWORDS) import warnings warnings.filterwarnings('ignore', 'This pattern has match groups') df = pd.read_csv('../input/d/brandonbenton/botezlive-chat-classification/botezlive_data.csv') def split_data(df, params): df_ones = df.loc[df['is_offensive'] == 1] df_tmp = df.loc[df['is_offensive'] == 0] ratio = len(df_ones) / len(df_tmp) msk = np.random.rand(len(df_tmp)) < params['multiplier'] * ratio df_zeros = df_tmp.loc[msk] df = pd.concat([df_ones, df_zeros]).reset_index(drop=True) df_train, df_test = train_test_split(df, test_size=0.2, random_state=0) return (df_train, df_test) def get_vectorizer(df_train, params): vparams = dict({'stop_words': stop_words, 'min_df': params['min_df'], 'max_df': params['max_df'], 'smooth_idf': params['smooth_idf'], 'analyzer': params['analyzer'], 'ngram_range': params['ngram_range'], 'sublinear_tf': params['sublinear_tf'], 'max_features': params['max_features']}) vectorizer = ft.TfidfVectorizer(**vparams) x_train = vectorizer.fit_transform(df_train['text']) if params['trim features']: y = df_train['is_offensive'] x_names = vectorizer.get_feature_names() p_value_limit = params['pvalue limit'] dtf_features = pd.DataFrame() for cat in np.unique(y): chi2, p_value = fs.chi2(x_train, y == cat) entry = pd.DataFrame({'feature': x_names, 'score': 1 - p_value, 'y': cat}) dtf_features = dtf_features.append(entry) dtf_features = dtf_features.sort_values(['y', 'score'], ascending=[True, False]) dtf_features = dtf_features[dtf_features['score'] > p_value_limit] x_names = dtf_features['feature'].unique().tolist() vparams['vocabulary'] = x_names vectorizer = ft.TfidfVectorizer(**vparams) return vectorizer def get_dual_vectorizer(df_train, p1, p2): vec1 = get_vectorizer(df_train, p1) vec2 = get_vectorizer(df_train, p2) features = set(vec1.get_feature_names()) | set(vec2.get_feature_names()) features = list(features) vectorizer = ft.TfidfVectorizer(vocabulary=features, max_features=p1['max_features']) return vectorizer def get_pca(params): pca = TruncatedSVD(n_components=params['n_components']) return pca def transformer(x): return np.array(x.toarray()) def param_score(df, params): df_train, df_test = split_data(df, params) vectorizer = get_vectorizer(df_train, params) model = get_model(params) y_train = df_train['is_offensive'] y_test = df_test['is_offensive'] num_splits = 1 if params['classifier_type'] == 'NN': nn = KerasClassifier(build_fn=lambda: model) nn._estimator_type = 'classifier' vectorizer.fit(df_train['text']) num_rows = df_train.shape[0] num_splits = 1 if params['pca']: pca = get_pca(params) pca.fit(vectorizer.transform(df_train['text'])) if num_splits == 1 and params['pca']: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits == 1 and (not params['pca']): my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits > 1 and params['pca']: pca.fit(vectorizer.transform(df_train['text'])) df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(pca.transform(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) elif num_splits > 1 and (not params['pca']): df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(transformer(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) else: clf = cal.CalibratedClassifierCV(model, cv=p1['cv'], method='sigmoid') if params['pca']: pca = get_pca(params) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', clf)]) else: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('classifier', clf)]) my_pipeline.fit(df_train['text'], df_train['is_offensive']) score = my_pipeline.score(df_test['text'], df_test['is_offensive']) discrete_preds = my_pipeline.predict(df_test['text']) confusion = confusion_matrix(y_test, discrete_preds) test_ones = sum(confusion[1][:]) test_zeros = sum(confusion[0][:]) scores = dict({'classifier_type': params['classifier_type'], 'splits': num_splits, 'n_samples': df_train.shape[0], 'ngram_range': params['ngram_range'], 'pca': params['pca'], 'n_components': params['n_components'], 'max_features': params['max_features'], 'trim features': params['trim features'], 'pvalue limit': params['pvalue limit'], 'multiplier': params['multiplier'], 'precision': precision_score(y_test, discrete_preds), 'jaccard': jaccard_score(y_test, discrete_preds), 'recall': recall_score(y_test, discrete_preds), 'F1': f1_score(y_test, discrete_preds), 'TP': confusion[1][1] / test_ones, 'FP': confusion[0][1] / test_zeros, 'TN': confusion[0][0] / test_zeros, 'FN': confusion[1][0] / test_ones}) return (my_pipeline, discrete_preds, df_train, df_test, scores) def get_model(params): if params['classifier_type'] == 'NN': if params['pca']: feature_num = params['n_components'] else: feature_num = params['max_features'] model = Sequential() model.add(Dense(128, activation='relu', input_shape=(feature_num,))) model.add(Dense(64, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() if params['classifier_type'] == 'SVM': cparams = dict({'max_iter': 1000, 'C': params['C'], 'class_weight': params['class_weight']}) model = svm.LinearSVC(**cparams) if params['classifier_type'] == 'RFC': model = RandomForestClassifier() return model txt_instance = 'its so hot out' explainer = lime_text.LimeTextExplainer(class_names=np.unique(df_train['is_offensive'])) explained = explainer.explain_instance(txt_instance, model.predict_proba, num_features=5) def plot_score_curves(clf, X, Y): ax = plt.gca() preds = clf.predict_proba(X)[:, 1] discrete_preds = clf.predict(X) clf_score = brier_score_loss(Y, preds, pos_label=1) frac_of_positives, mean_predicted_value = cal.calibration_curve(Y, preds, n_bins=20) clf_score = brier_score_loss(Y, discrete_preds, pos_label=1) frac_of_positives, mean_predicted_value = cal.calibration_curve(Y, discrete_preds, n_bins=20) plot_score_curves(model, df_train['text'], df_train['is_offensive'])
code
73075982/cell_28
[ "text_html_output_1.png" ]
from lime import lime_text from nltk.corpus import stopwords from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix,jaccard_score,f1_score,recall_score from sklearn.metrics import precision_score,roc_auc_score,log_loss from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.models import Sequential, Model from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS import numpy as np import pandas as pd import sklearn.calibration as cal import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs import sklearn.pipeline as pipe import sklearn.svm as svm import warnings import pandas as pd import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs from sklearn.model_selection import train_test_split, GridSearchCV import sklearn.svm as svm import sklearn.calibration as cal import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, brier_score_loss from sklearn.metrics import plot_precision_recall_curve, classification_report from sklearn.metrics import precision_score, roc_auc_score, log_loss from sklearn.metrics import confusion_matrix, jaccard_score, f1_score, recall_score from sklearn.preprocessing import FunctionTransformer import seaborn as sns import numpy as np import time import datetime import sklearn.pipeline as pipe from lime import lime_text from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.decomposition import TruncatedSVD import re import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.utils import to_categorical from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS stop_words = set(stopwords.words('english')) stopwords = set(STOPWORDS) import warnings warnings.filterwarnings('ignore', 'This pattern has match groups') df = pd.read_csv('../input/d/brandonbenton/botezlive-chat-classification/botezlive_data.csv') def split_data(df, params): df_ones = df.loc[df['is_offensive'] == 1] df_tmp = df.loc[df['is_offensive'] == 0] ratio = len(df_ones) / len(df_tmp) msk = np.random.rand(len(df_tmp)) < params['multiplier'] * ratio df_zeros = df_tmp.loc[msk] df = pd.concat([df_ones, df_zeros]).reset_index(drop=True) df_train, df_test = train_test_split(df, test_size=0.2, random_state=0) return (df_train, df_test) def get_vectorizer(df_train, params): vparams = dict({'stop_words': stop_words, 'min_df': params['min_df'], 'max_df': params['max_df'], 'smooth_idf': params['smooth_idf'], 'analyzer': params['analyzer'], 'ngram_range': params['ngram_range'], 'sublinear_tf': params['sublinear_tf'], 'max_features': params['max_features']}) vectorizer = ft.TfidfVectorizer(**vparams) x_train = vectorizer.fit_transform(df_train['text']) if params['trim features']: y = df_train['is_offensive'] x_names = vectorizer.get_feature_names() p_value_limit = params['pvalue limit'] dtf_features = pd.DataFrame() for cat in np.unique(y): chi2, p_value = fs.chi2(x_train, y == cat) entry = pd.DataFrame({'feature': x_names, 'score': 1 - p_value, 'y': cat}) dtf_features = dtf_features.append(entry) dtf_features = dtf_features.sort_values(['y', 'score'], ascending=[True, False]) dtf_features = dtf_features[dtf_features['score'] > p_value_limit] x_names = dtf_features['feature'].unique().tolist() vparams['vocabulary'] = x_names vectorizer = ft.TfidfVectorizer(**vparams) return vectorizer def get_dual_vectorizer(df_train, p1, p2): vec1 = get_vectorizer(df_train, p1) vec2 = get_vectorizer(df_train, p2) features = set(vec1.get_feature_names()) | set(vec2.get_feature_names()) features = list(features) vectorizer = ft.TfidfVectorizer(vocabulary=features, max_features=p1['max_features']) return vectorizer def get_pca(params): pca = TruncatedSVD(n_components=params['n_components']) return pca def transformer(x): return np.array(x.toarray()) def param_score(df, params): df_train, df_test = split_data(df, params) vectorizer = get_vectorizer(df_train, params) model = get_model(params) y_train = df_train['is_offensive'] y_test = df_test['is_offensive'] num_splits = 1 if params['classifier_type'] == 'NN': nn = KerasClassifier(build_fn=lambda: model) nn._estimator_type = 'classifier' vectorizer.fit(df_train['text']) num_rows = df_train.shape[0] num_splits = 1 if params['pca']: pca = get_pca(params) pca.fit(vectorizer.transform(df_train['text'])) if num_splits == 1 and params['pca']: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits == 1 and (not params['pca']): my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits > 1 and params['pca']: pca.fit(vectorizer.transform(df_train['text'])) df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(pca.transform(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) elif num_splits > 1 and (not params['pca']): df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(transformer(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) else: clf = cal.CalibratedClassifierCV(model, cv=p1['cv'], method='sigmoid') if params['pca']: pca = get_pca(params) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', clf)]) else: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('classifier', clf)]) my_pipeline.fit(df_train['text'], df_train['is_offensive']) score = my_pipeline.score(df_test['text'], df_test['is_offensive']) discrete_preds = my_pipeline.predict(df_test['text']) confusion = confusion_matrix(y_test, discrete_preds) test_ones = sum(confusion[1][:]) test_zeros = sum(confusion[0][:]) scores = dict({'classifier_type': params['classifier_type'], 'splits': num_splits, 'n_samples': df_train.shape[0], 'ngram_range': params['ngram_range'], 'pca': params['pca'], 'n_components': params['n_components'], 'max_features': params['max_features'], 'trim features': params['trim features'], 'pvalue limit': params['pvalue limit'], 'multiplier': params['multiplier'], 'precision': precision_score(y_test, discrete_preds), 'jaccard': jaccard_score(y_test, discrete_preds), 'recall': recall_score(y_test, discrete_preds), 'F1': f1_score(y_test, discrete_preds), 'TP': confusion[1][1] / test_ones, 'FP': confusion[0][1] / test_zeros, 'TN': confusion[0][0] / test_zeros, 'FN': confusion[1][0] / test_ones}) return (my_pipeline, discrete_preds, df_train, df_test, scores) def get_model(params): if params['classifier_type'] == 'NN': if params['pca']: feature_num = params['n_components'] else: feature_num = params['max_features'] model = Sequential() model.add(Dense(128, activation='relu', input_shape=(feature_num,))) model.add(Dense(64, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() if params['classifier_type'] == 'SVM': cparams = dict({'max_iter': 1000, 'C': params['C'], 'class_weight': params['class_weight']}) model = svm.LinearSVC(**cparams) if params['classifier_type'] == 'RFC': model = RandomForestClassifier() return model txt_instance = 'its so hot out' explainer = lime_text.LimeTextExplainer(class_names=np.unique(df_train['is_offensive'])) explained = explainer.explain_instance(txt_instance, model.predict_proba, num_features=5) explained.show_in_notebook(text=txt_instance, predict_proba=True)
code
73075982/cell_31
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from lime import lime_text from nltk.corpus import stopwords from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix,jaccard_score,f1_score,recall_score from sklearn.metrics import plot_confusion_matrix,plot_roc_curve,brier_score_loss from sklearn.metrics import plot_precision_recall_curve,classification_report from sklearn.metrics import precision_score,roc_auc_score,log_loss from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.models import Sequential, Model from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.calibration as cal import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs import sklearn.pipeline as pipe import sklearn.svm as svm import warnings import pandas as pd import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs from sklearn.model_selection import train_test_split, GridSearchCV import sklearn.svm as svm import sklearn.calibration as cal import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, brier_score_loss from sklearn.metrics import plot_precision_recall_curve, classification_report from sklearn.metrics import precision_score, roc_auc_score, log_loss from sklearn.metrics import confusion_matrix, jaccard_score, f1_score, recall_score from sklearn.preprocessing import FunctionTransformer import seaborn as sns import numpy as np import time import datetime import sklearn.pipeline as pipe from lime import lime_text from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.decomposition import TruncatedSVD import re import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.utils import to_categorical from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS stop_words = set(stopwords.words('english')) stopwords = set(STOPWORDS) import warnings warnings.filterwarnings('ignore', 'This pattern has match groups') df = pd.read_csv('../input/d/brandonbenton/botezlive-chat-classification/botezlive_data.csv') def split_data(df, params): df_ones = df.loc[df['is_offensive'] == 1] df_tmp = df.loc[df['is_offensive'] == 0] ratio = len(df_ones) / len(df_tmp) msk = np.random.rand(len(df_tmp)) < params['multiplier'] * ratio df_zeros = df_tmp.loc[msk] df = pd.concat([df_ones, df_zeros]).reset_index(drop=True) df_train, df_test = train_test_split(df, test_size=0.2, random_state=0) return (df_train, df_test) def get_vectorizer(df_train, params): vparams = dict({'stop_words': stop_words, 'min_df': params['min_df'], 'max_df': params['max_df'], 'smooth_idf': params['smooth_idf'], 'analyzer': params['analyzer'], 'ngram_range': params['ngram_range'], 'sublinear_tf': params['sublinear_tf'], 'max_features': params['max_features']}) vectorizer = ft.TfidfVectorizer(**vparams) x_train = vectorizer.fit_transform(df_train['text']) if params['trim features']: y = df_train['is_offensive'] x_names = vectorizer.get_feature_names() p_value_limit = params['pvalue limit'] dtf_features = pd.DataFrame() for cat in np.unique(y): chi2, p_value = fs.chi2(x_train, y == cat) entry = pd.DataFrame({'feature': x_names, 'score': 1 - p_value, 'y': cat}) dtf_features = dtf_features.append(entry) dtf_features = dtf_features.sort_values(['y', 'score'], ascending=[True, False]) dtf_features = dtf_features[dtf_features['score'] > p_value_limit] x_names = dtf_features['feature'].unique().tolist() vparams['vocabulary'] = x_names vectorizer = ft.TfidfVectorizer(**vparams) return vectorizer def get_dual_vectorizer(df_train, p1, p2): vec1 = get_vectorizer(df_train, p1) vec2 = get_vectorizer(df_train, p2) features = set(vec1.get_feature_names()) | set(vec2.get_feature_names()) features = list(features) vectorizer = ft.TfidfVectorizer(vocabulary=features, max_features=p1['max_features']) return vectorizer def get_pca(params): pca = TruncatedSVD(n_components=params['n_components']) return pca def transformer(x): return np.array(x.toarray()) def param_score(df, params): df_train, df_test = split_data(df, params) vectorizer = get_vectorizer(df_train, params) model = get_model(params) y_train = df_train['is_offensive'] y_test = df_test['is_offensive'] num_splits = 1 if params['classifier_type'] == 'NN': nn = KerasClassifier(build_fn=lambda: model) nn._estimator_type = 'classifier' vectorizer.fit(df_train['text']) num_rows = df_train.shape[0] num_splits = 1 if params['pca']: pca = get_pca(params) pca.fit(vectorizer.transform(df_train['text'])) if num_splits == 1 and params['pca']: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits == 1 and (not params['pca']): my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits > 1 and params['pca']: pca.fit(vectorizer.transform(df_train['text'])) df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(pca.transform(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) elif num_splits > 1 and (not params['pca']): df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(transformer(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) else: clf = cal.CalibratedClassifierCV(model, cv=p1['cv'], method='sigmoid') if params['pca']: pca = get_pca(params) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', clf)]) else: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('classifier', clf)]) my_pipeline.fit(df_train['text'], df_train['is_offensive']) score = my_pipeline.score(df_test['text'], df_test['is_offensive']) discrete_preds = my_pipeline.predict(df_test['text']) confusion = confusion_matrix(y_test, discrete_preds) test_ones = sum(confusion[1][:]) test_zeros = sum(confusion[0][:]) scores = dict({'classifier_type': params['classifier_type'], 'splits': num_splits, 'n_samples': df_train.shape[0], 'ngram_range': params['ngram_range'], 'pca': params['pca'], 'n_components': params['n_components'], 'max_features': params['max_features'], 'trim features': params['trim features'], 'pvalue limit': params['pvalue limit'], 'multiplier': params['multiplier'], 'precision': precision_score(y_test, discrete_preds), 'jaccard': jaccard_score(y_test, discrete_preds), 'recall': recall_score(y_test, discrete_preds), 'F1': f1_score(y_test, discrete_preds), 'TP': confusion[1][1] / test_ones, 'FP': confusion[0][1] / test_zeros, 'TN': confusion[0][0] / test_zeros, 'FN': confusion[1][0] / test_ones}) return (my_pipeline, discrete_preds, df_train, df_test, scores) def get_model(params): if params['classifier_type'] == 'NN': if params['pca']: feature_num = params['n_components'] else: feature_num = params['max_features'] model = Sequential() model.add(Dense(128, activation='relu', input_shape=(feature_num,))) model.add(Dense(64, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() if params['classifier_type'] == 'SVM': cparams = dict({'max_iter': 1000, 'C': params['C'], 'class_weight': params['class_weight']}) model = svm.LinearSVC(**cparams) if params['classifier_type'] == 'RFC': model = RandomForestClassifier() return model txt_instance = 'its so hot out' explainer = lime_text.LimeTextExplainer(class_names=np.unique(df_train['is_offensive'])) explained = explainer.explain_instance(txt_instance, model.predict_proba, num_features=5) def plot_score_curves(clf, X, Y): ax = plt.gca() preds = clf.predict_proba(X)[:, 1] discrete_preds = clf.predict(X) clf_score = brier_score_loss(Y, preds, pos_label=1) frac_of_positives, mean_predicted_value = cal.calibration_curve(Y, preds, n_bins=20) clf_score = brier_score_loss(Y, discrete_preds, pos_label=1) frac_of_positives, mean_predicted_value = cal.calibration_curve(Y, discrete_preds, n_bins=20) plot_score_curves(model, df_test['text'], df_test['is_offensive'])
code
73075982/cell_24
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import TruncatedSVD from sklearn.metrics import confusion_matrix,jaccard_score,f1_score,recall_score from sklearn.metrics import precision_score,roc_auc_score,log_loss from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS import datetime import numpy as np import pandas as pd import sklearn.calibration as cal import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs import sklearn.pipeline as pipe import time import warnings import pandas as pd import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs from sklearn.model_selection import train_test_split, GridSearchCV import sklearn.svm as svm import sklearn.calibration as cal import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, brier_score_loss from sklearn.metrics import plot_precision_recall_curve, classification_report from sklearn.metrics import precision_score, roc_auc_score, log_loss from sklearn.metrics import confusion_matrix, jaccard_score, f1_score, recall_score from sklearn.preprocessing import FunctionTransformer import seaborn as sns import numpy as np import time import datetime import sklearn.pipeline as pipe from lime import lime_text from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.decomposition import TruncatedSVD import re import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.utils import to_categorical from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS stop_words = set(stopwords.words('english')) stopwords = set(STOPWORDS) import warnings warnings.filterwarnings('ignore', 'This pattern has match groups') df = pd.read_csv('../input/d/brandonbenton/botezlive-chat-classification/botezlive_data.csv') def split_data(df, params): df_ones = df.loc[df['is_offensive'] == 1] df_tmp = df.loc[df['is_offensive'] == 0] ratio = len(df_ones) / len(df_tmp) msk = np.random.rand(len(df_tmp)) < params['multiplier'] * ratio df_zeros = df_tmp.loc[msk] df = pd.concat([df_ones, df_zeros]).reset_index(drop=True) df_train, df_test = train_test_split(df, test_size=0.2, random_state=0) return (df_train, df_test) def get_vectorizer(df_train, params): vparams = dict({'stop_words': stop_words, 'min_df': params['min_df'], 'max_df': params['max_df'], 'smooth_idf': params['smooth_idf'], 'analyzer': params['analyzer'], 'ngram_range': params['ngram_range'], 'sublinear_tf': params['sublinear_tf'], 'max_features': params['max_features']}) vectorizer = ft.TfidfVectorizer(**vparams) x_train = vectorizer.fit_transform(df_train['text']) if params['trim features']: y = df_train['is_offensive'] x_names = vectorizer.get_feature_names() p_value_limit = params['pvalue limit'] dtf_features = pd.DataFrame() for cat in np.unique(y): chi2, p_value = fs.chi2(x_train, y == cat) entry = pd.DataFrame({'feature': x_names, 'score': 1 - p_value, 'y': cat}) dtf_features = dtf_features.append(entry) dtf_features = dtf_features.sort_values(['y', 'score'], ascending=[True, False]) dtf_features = dtf_features[dtf_features['score'] > p_value_limit] x_names = dtf_features['feature'].unique().tolist() vparams['vocabulary'] = x_names vectorizer = ft.TfidfVectorizer(**vparams) return vectorizer def get_dual_vectorizer(df_train, p1, p2): vec1 = get_vectorizer(df_train, p1) vec2 = get_vectorizer(df_train, p2) features = set(vec1.get_feature_names()) | set(vec2.get_feature_names()) features = list(features) vectorizer = ft.TfidfVectorizer(vocabulary=features, max_features=p1['max_features']) return vectorizer def get_pca(params): pca = TruncatedSVD(n_components=params['n_components']) return pca def transformer(x): return np.array(x.toarray()) def param_score(df, params): df_train, df_test = split_data(df, params) vectorizer = get_vectorizer(df_train, params) model = get_model(params) y_train = df_train['is_offensive'] y_test = df_test['is_offensive'] num_splits = 1 if params['classifier_type'] == 'NN': nn = KerasClassifier(build_fn=lambda: model) nn._estimator_type = 'classifier' vectorizer.fit(df_train['text']) num_rows = df_train.shape[0] num_splits = 1 if params['pca']: pca = get_pca(params) pca.fit(vectorizer.transform(df_train['text'])) if num_splits == 1 and params['pca']: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits == 1 and (not params['pca']): my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits > 1 and params['pca']: pca.fit(vectorizer.transform(df_train['text'])) df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(pca.transform(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) elif num_splits > 1 and (not params['pca']): df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(transformer(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) else: clf = cal.CalibratedClassifierCV(model, cv=p1['cv'], method='sigmoid') if params['pca']: pca = get_pca(params) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', clf)]) else: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('classifier', clf)]) my_pipeline.fit(df_train['text'], df_train['is_offensive']) score = my_pipeline.score(df_test['text'], df_test['is_offensive']) discrete_preds = my_pipeline.predict(df_test['text']) confusion = confusion_matrix(y_test, discrete_preds) test_ones = sum(confusion[1][:]) test_zeros = sum(confusion[0][:]) scores = dict({'classifier_type': params['classifier_type'], 'splits': num_splits, 'n_samples': df_train.shape[0], 'ngram_range': params['ngram_range'], 'pca': params['pca'], 'n_components': params['n_components'], 'max_features': params['max_features'], 'trim features': params['trim features'], 'pvalue limit': params['pvalue limit'], 'multiplier': params['multiplier'], 'precision': precision_score(y_test, discrete_preds), 'jaccard': jaccard_score(y_test, discrete_preds), 'recall': recall_score(y_test, discrete_preds), 'F1': f1_score(y_test, discrete_preds), 'TP': confusion[1][1] / test_ones, 'FP': confusion[0][1] / test_zeros, 'TN': confusion[0][0] / test_zeros, 'FN': confusion[1][0] / test_ones}) return (my_pipeline, discrete_preds, df_train, df_test, scores) headers = ['classifier_type', 'splits', 'n_samples', 'ngram_range', 'trim features', 'pca', 'n_components', 'max_features', 'pvalue limit', 'multiplier', 'precision', 'jaccard', 'recall', 'F1', 'TP', 'FP', 'TN', 'FN'] df_scores = pd.DataFrame(columns=headers) p1 = dict({'classifier_type': 'SVM', 'min_df': 1, 'max_df': 0.9, 'smooth_idf': 1, 'sublinear_tf': 1, 'ngram_range': (2, 10), 'max_features': None, 'C': 1.0, 'cv': 5, 'analyzer': 'char_wb', 'trim features': False, 'pvalue limit': 0.4, 'class_weight': None, 'multiplier': 1, 'n_components': 400, 'epochs': 10, 'pca': False}) p2 = p1.copy() p2['classifier_type'] = 'NN' p2['max_features'] = 20000 p2['pca'] = True params_list = [p1] for i, p in enumerate(params_list): start = time.time() model, discrete_preds, df_train, df_test, scores = param_score(df, p) df_scores = df_scores.append(scores, ignore_index=True) end = time.time() elapsed = end - start remaining_seconds = elapsed * (len(params_list) - i - 1) df_scores['avg score'] = df_scores[['precision', 'recall', 'jaccard', 'F1', 'TP', 'TN']].values.mean(axis=1) df_scores
code
73075982/cell_22
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import TruncatedSVD from sklearn.metrics import confusion_matrix,jaccard_score,f1_score,recall_score from sklearn.metrics import precision_score,roc_auc_score,log_loss from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS import datetime import numpy as np import pandas as pd import sklearn.calibration as cal import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs import sklearn.pipeline as pipe import time import warnings import pandas as pd import sklearn.feature_extraction.text as ft import sklearn.feature_selection as fs from sklearn.model_selection import train_test_split, GridSearchCV import sklearn.svm as svm import sklearn.calibration as cal import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, brier_score_loss from sklearn.metrics import plot_precision_recall_curve, classification_report from sklearn.metrics import precision_score, roc_auc_score, log_loss from sklearn.metrics import confusion_matrix, jaccard_score, f1_score, recall_score from sklearn.preprocessing import FunctionTransformer import seaborn as sns import numpy as np import time import datetime import sklearn.pipeline as pipe from lime import lime_text from nltk.corpus import stopwords from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.decomposition import TruncatedSVD import re import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, GlobalMaxPooling1D, Conv1D, Embedding, LSTM from tensorflow.keras.utils import to_categorical from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from wordcloud import WordCloud, STOPWORDS stop_words = set(stopwords.words('english')) stopwords = set(STOPWORDS) import warnings warnings.filterwarnings('ignore', 'This pattern has match groups') df = pd.read_csv('../input/d/brandonbenton/botezlive-chat-classification/botezlive_data.csv') def split_data(df, params): df_ones = df.loc[df['is_offensive'] == 1] df_tmp = df.loc[df['is_offensive'] == 0] ratio = len(df_ones) / len(df_tmp) msk = np.random.rand(len(df_tmp)) < params['multiplier'] * ratio df_zeros = df_tmp.loc[msk] df = pd.concat([df_ones, df_zeros]).reset_index(drop=True) df_train, df_test = train_test_split(df, test_size=0.2, random_state=0) return (df_train, df_test) def get_vectorizer(df_train, params): vparams = dict({'stop_words': stop_words, 'min_df': params['min_df'], 'max_df': params['max_df'], 'smooth_idf': params['smooth_idf'], 'analyzer': params['analyzer'], 'ngram_range': params['ngram_range'], 'sublinear_tf': params['sublinear_tf'], 'max_features': params['max_features']}) vectorizer = ft.TfidfVectorizer(**vparams) x_train = vectorizer.fit_transform(df_train['text']) if params['trim features']: y = df_train['is_offensive'] x_names = vectorizer.get_feature_names() p_value_limit = params['pvalue limit'] dtf_features = pd.DataFrame() for cat in np.unique(y): chi2, p_value = fs.chi2(x_train, y == cat) entry = pd.DataFrame({'feature': x_names, 'score': 1 - p_value, 'y': cat}) dtf_features = dtf_features.append(entry) dtf_features = dtf_features.sort_values(['y', 'score'], ascending=[True, False]) dtf_features = dtf_features[dtf_features['score'] > p_value_limit] x_names = dtf_features['feature'].unique().tolist() vparams['vocabulary'] = x_names vectorizer = ft.TfidfVectorizer(**vparams) return vectorizer def get_dual_vectorizer(df_train, p1, p2): vec1 = get_vectorizer(df_train, p1) vec2 = get_vectorizer(df_train, p2) features = set(vec1.get_feature_names()) | set(vec2.get_feature_names()) features = list(features) vectorizer = ft.TfidfVectorizer(vocabulary=features, max_features=p1['max_features']) return vectorizer def get_pca(params): pca = TruncatedSVD(n_components=params['n_components']) return pca def transformer(x): return np.array(x.toarray()) def param_score(df, params): df_train, df_test = split_data(df, params) vectorizer = get_vectorizer(df_train, params) model = get_model(params) y_train = df_train['is_offensive'] y_test = df_test['is_offensive'] num_splits = 1 if params['classifier_type'] == 'NN': nn = KerasClassifier(build_fn=lambda: model) nn._estimator_type = 'classifier' vectorizer.fit(df_train['text']) num_rows = df_train.shape[0] num_splits = 1 if params['pca']: pca = get_pca(params) pca.fit(vectorizer.transform(df_train['text'])) if num_splits == 1 and params['pca']: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits == 1 and (not params['pca']): my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) my_pipeline.fit(df_train['text'], df_train['is_offensive'], classifier__verbose=1, classifier__epochs=params['epochs'], classifier__batch_size=4) elif num_splits > 1 and params['pca']: pca.fit(vectorizer.transform(df_train['text'])) df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(pca.transform(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', nn)]) elif num_splits > 1 and (not params['pca']): df_array = np.array_split(df_train, num_splits) for i in range(num_splits): df_tmp = df_array[i] nn.fit(transformer(vectorizer.transform(df_tmp['text'])), df_tmp['is_offensive'], verbose=1, epochs=params['epochs'], batch_size=4) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('transformer', FunctionTransformer(transformer)), ('classifier', nn)]) else: clf = cal.CalibratedClassifierCV(model, cv=p1['cv'], method='sigmoid') if params['pca']: pca = get_pca(params) my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('pca', pca), ('classifier', clf)]) else: my_pipeline = pipe.Pipeline([('vectorizer', vectorizer), ('classifier', clf)]) my_pipeline.fit(df_train['text'], df_train['is_offensive']) score = my_pipeline.score(df_test['text'], df_test['is_offensive']) discrete_preds = my_pipeline.predict(df_test['text']) confusion = confusion_matrix(y_test, discrete_preds) test_ones = sum(confusion[1][:]) test_zeros = sum(confusion[0][:]) scores = dict({'classifier_type': params['classifier_type'], 'splits': num_splits, 'n_samples': df_train.shape[0], 'ngram_range': params['ngram_range'], 'pca': params['pca'], 'n_components': params['n_components'], 'max_features': params['max_features'], 'trim features': params['trim features'], 'pvalue limit': params['pvalue limit'], 'multiplier': params['multiplier'], 'precision': precision_score(y_test, discrete_preds), 'jaccard': jaccard_score(y_test, discrete_preds), 'recall': recall_score(y_test, discrete_preds), 'F1': f1_score(y_test, discrete_preds), 'TP': confusion[1][1] / test_ones, 'FP': confusion[0][1] / test_zeros, 'TN': confusion[0][0] / test_zeros, 'FN': confusion[1][0] / test_ones}) return (my_pipeline, discrete_preds, df_train, df_test, scores) headers = ['classifier_type', 'splits', 'n_samples', 'ngram_range', 'trim features', 'pca', 'n_components', 'max_features', 'pvalue limit', 'multiplier', 'precision', 'jaccard', 'recall', 'F1', 'TP', 'FP', 'TN', 'FN'] df_scores = pd.DataFrame(columns=headers) p1 = dict({'classifier_type': 'SVM', 'min_df': 1, 'max_df': 0.9, 'smooth_idf': 1, 'sublinear_tf': 1, 'ngram_range': (2, 10), 'max_features': None, 'C': 1.0, 'cv': 5, 'analyzer': 'char_wb', 'trim features': False, 'pvalue limit': 0.4, 'class_weight': None, 'multiplier': 1, 'n_components': 400, 'epochs': 10, 'pca': False}) p2 = p1.copy() p2['classifier_type'] = 'NN' p2['max_features'] = 20000 p2['pca'] = True params_list = [p1] for i, p in enumerate(params_list): start = time.time() model, discrete_preds, df_train, df_test, scores = param_score(df, p) df_scores = df_scores.append(scores, ignore_index=True) end = time.time() elapsed = end - start remaining_seconds = elapsed * (len(params_list) - i - 1) print('Done: {:.5f}, Remaining: {:<25}'.format((i + 1) / len(params_list), str(datetime.timedelta(seconds=remaining_seconds))), end='\r')
code
16148304/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from sklearn import svm from keras.models import Sequential from keras.layers import Dense, Conv2DTranspose, Conv2D, MaxPooling2D from keras.layers import Dropout, Flatten from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical
code
16148304/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') Y_train = train['label'] X_train = train.drop(labels=['label'], axis=1) X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) g = plt.imshow(X_train[10][:, :, 0])
code
16148304/cell_17
[ "image_output_1.png" ]
from keras.layers import Dense, Conv2DTranspose,Conv2D, MaxPooling2D from keras.layers import Dropout, Flatten from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator from keras.utils.np_utils import to_categorical import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') Y_train = train['label'] X_train = train.drop(labels=['label'], axis=1) X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) Y_train = to_categorical(Y_train, num_classes=10) def baseline_model(): model = Sequential() model.add(Conv2D(32, 3, input_shape=(28, 28, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, 3, strides=1, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, 3, strides=1, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, 3, strides=1, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) return model model = baseline_model() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=50), epochs=3, validation_data=(X_val, Y_val), verbose=1, steps_per_epoch=X_train.shape[0] // 50)
code
129027504/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()]
code
129027504/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.head()
code
129027504/cell_25
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] data.dtypes lables = data['similar'] x_pos = data[['question1', 'question2', 'similar']][lables == 1] x_pos x_neg = data[['question1', 'question2', 'similar']][lables == 0] x_neg print(x_pos.shape) print(x_neg.shape)
code
129027504/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] data.dtypes lables = data['similar'] x_pos = data[['question1', 'question2', 'similar']][lables == 1] x_pos
code
129027504/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] data.dtypes lables = data['similar'] x_pos = data[['question1', 'question2', 'similar']][lables == 1] x_pos x_neg = data[['question1', 'question2', 'similar']][lables == 0] x_neg dff = pd.concat([x_pos[:32000], x_neg[:32000]], axis=0) dff.shape dff
code
129027504/cell_29
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] data.dtypes lables = data['similar'] x_pos = data[['question1', 'question2', 'similar']][lables == 1] x_pos x_neg = data[['question1', 'question2', 'similar']][lables == 0] x_neg dff = pd.concat([x_pos[:32000], x_neg[:32000]], axis=0) dff.shape
code
129027504/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings import string import nltk import pickle from nltk import word_tokenize from nltk.stem import PorterStemmer from tqdm import tqdm !pip install gradio import gradio as gr ps = PorterStemmer() # stemming stopwords = nltk.corpus.stopwords.words('english') warnings.filterwarnings('ignore')
code
129027504/cell_19
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] print(len(data)) data.dtypes
code
129027504/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] data.dtypes lables = data['similar'] x_pos = data[['question1', 'question2', 'similar']][lables == 1] x_pos x_neg = data[['question1', 'question2', 'similar']][lables == 0] x_neg dff = pd.concat([x_pos[:32000], x_neg[:32000]], axis=0) dff.shape dff = dff.sample(frac=1).reset_index(drop=True) dff
code
129027504/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes
code
129027504/cell_16
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes print(len(data))
code
129027504/cell_24
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()] data.dtypes mask = data.isnull().any(axis=1) data = data[~mask] data.dtypes lables = data['similar'] x_neg = data[['question1', 'question2', 'similar']][lables == 0] x_neg
code
129027504/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum() data[data.question1.isnull()] data[data.question2.isnull()]
code
129027504/cell_10
[ "text_plain_output_35.png", "text_plain_output_43.png", "text_plain_output_37.png", "text_plain_output_5.png", "text_plain_output_48.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_40.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_49.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_47.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_42.png", "text_plain_output_23.png", "text_plain_output_51.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_33.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_39.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_46.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) print(data['similar'].value_counts()) print(data['similar'].value_counts() / data['similar'].count() * 100) data['similar'].value_counts().plot(kind='bar')
code
129027504/cell_12
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.rename(columns={'is_duplicate': 'similar'}, inplace=True) data.drop(['id', 'qid1', 'qid2'], axis=1, inplace=True) data.isnull().sum()
code
129027504/cell_5
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip') pd.set_option('display.max_colwidth', 100) data.head()
code
73083395/cell_25
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
preds = aml.predict(test_hf) preds_df = h2o.as_list(preds) preds_df
code
73083395/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
ypred = automl.predict(test_df.values)
code
73083395/cell_23
[ "text_html_output_1.png", "text_plain_output_1.png" ]
train_hf = h2o.H2OFrame(train_df.copy()) test_hf = h2o.H2OFrame(test_df.copy())
code
73083395/cell_33
[ "text_plain_output_1.png" ]
print('Best ML leaner:', automl.best_estimator) print('Best hyperparmeter config:', automl.best_config) print('Best accuracy on validation data: {0:.4g}'.format(1 - automl.best_loss)) print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))
code
73083395/cell_29
[ "text_html_output_1.png", "text_plain_output_1.png" ]
!pip install -U flaml
code
73083395/cell_11
[ "text_plain_output_1.png" ]
from lightautoml.automl.presets.tabular_presets import TabularAutoML, TabularUtilizedAutoML from lightautoml.tasks import Task import torch
code
73083395/cell_32
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
automl = AutoML() automl_settings = {'time_budget': 1200, 'metric': 'rmse', 'task': 'regression', 'seed': 2021, 'log_file_name': 'tpsaug21log.log'} automl.fit(X_train=X, y_train=y, **automl_settings)
code
73083395/cell_16
[ "text_plain_output_1.png" ]
automl = TabularAutoML(task=task, timeout=TIMEOUT, cpu_limit=N_THREADS, reader_params={'n_jobs': N_THREADS, 'cv': N_FOLDS, 'random_state': RANDOM_STATE}, general_params={'use_algos': [['linear_l2', 'cb', 'lgb', 'lgb_tuned']]}, lgb_params={'default_params': lgb_params, 'freeze_defaults': True}, cb_params={'default_params': cb_params, 'freeze_defaults': True}, verbose=2)
code
73083395/cell_3
[ "text_plain_output_1.png" ]
!pip install scikit-learn --upgrade from sklearn.metrics import mean_squared_error from sklearn.preprocessing import StandardScaler
code
73083395/cell_17
[ "text_plain_output_1.png" ]
oof_pred = automl.fit_predict(train_df, roles=roles) test_pred = automl.predict(test_df)
code
73083395/cell_35
[ "application_vnd.jupyter.stderr_output_1.png" ]
ypred
code
73083395/cell_24
[ "text_plain_output_1.png" ]
aml = H2OAutoML(seed=2021, max_runtime_secs=1200, sort_metric='RMSE') aml.train(x=train_hf.columns, y='loss', training_frame=train_hf) lb = aml.leaderboard lb.head(rows=lb.nrows)
code
73083395/cell_22
[ "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import h2o h2o.init()
code
73083395/cell_10
[ "text_plain_output_1.png" ]
!pip install -U lightautoml
code
90124933/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_orig = pd.read_csv('/kaggle/input/daily-min-temperatures/daily-min-temperatures.csv') data_orig['Date'] = pd.to_datetime(data_orig['Date']) ax = data_orig.plot(x='Date', y='Temp', figsize=(12, 6))
code
90124933/cell_6
[ "image_output_1.png" ]
from statsmodels.tsa.seasonal import seasonal_decompose import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_orig = pd.read_csv('/kaggle/input/daily-min-temperatures/daily-min-temperatures.csv') data_orig['Date'] = pd.to_datetime(data_orig['Date']) # convert date column to datetime ax = data_orig.plot(x='Date', y='Temp', figsize=(12,6)) ax = data_orig.plot(x='Date', y='Temp', figsize=(12,6)) xcoords = ['1981-01-01', '1982-01-01', '1983-01-01', '1984-01-01', '1985-01-01', '1986-01-01', '1987-01-01', '1988-01-01', '1989-01-01', '1990-01-01', '1990-12-31'] for xc in xcoords: plt.axvline(x=xc, color='black', linestyle='--') # The temperature seem to go down in the beginning of every year. # And then slowly go up until the mid of year during which the temperature peaks. # And again it starts to dip slowly until the end of year. # We can see this repetitive seasonal trend. from statsmodels.tsa.seasonal import seasonal_decompose data_orig.set_index('Date', inplace=True) data_orig.sort_index(inplace=True) analysis = data_orig[['Temp']].copy() decompose_result_mult = seasonal_decompose(analysis, model='additive', period=365) decompose_result_mult.plot()
code
90124933/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_orig = pd.read_csv('/kaggle/input/daily-min-temperatures/daily-min-temperatures.csv') print(data_orig.count) data_orig.head()
code
90124933/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from matplotlib import pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90124933/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_orig = pd.read_csv('/kaggle/input/daily-min-temperatures/daily-min-temperatures.csv') data_orig['Temp'].isnull().values.any()
code
90124933/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_orig = pd.read_csv('/kaggle/input/daily-min-temperatures/daily-min-temperatures.csv') data_orig['Date'] = pd.to_datetime(data_orig['Date']) # convert date column to datetime ax = data_orig.plot(x='Date', y='Temp', figsize=(12,6)) ax = data_orig.plot(x='Date', y='Temp', figsize=(12, 6)) xcoords = ['1981-01-01', '1982-01-01', '1983-01-01', '1984-01-01', '1985-01-01', '1986-01-01', '1987-01-01', '1988-01-01', '1989-01-01', '1990-01-01', '1990-12-31'] for xc in xcoords: plt.axvline(x=xc, color='black', linestyle='--')
code
32062359/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from langdetect import detect from nltk.tokenize import sent_tokenize,word_tokenize from sklearn.model_selection import train_test_split from snorkel.labeling import PandasLFApplier,LFAnalysis,LabelingFunction from snorkel.labeling.model.label_model import LabelModel from tqdm import tqdm import pandas as pd import pandas as pd keywordlist = ['inhibitor'] def loopsearch(keywordlist, researchpaperfu): alldataframeco = pd.DataFrame() alldataframenoco = pd.DataFrame() allcopid = [] allnocopid = [] for i in tqdm(keywordlist): covinf = researchpaperfu.covid_related().search(i, num_results=1000, covid_related=False, view='table').results[['cord_uid', 'title', 'abstract']] notcovinf = researchpaperfu.not_covid_related().search(i, num_results=10000, covid_related=False, view='table').results[['cord_uid', 'title', 'abstract']] covinfpid = list(covinf.cord_uid.values) notcovinfpid = list(notcovinf.cord_uid.values) alldataframeco = pd.concat([covinf, alldataframeco]) alldataframenoco = pd.concat([notcovinf, alldataframenoco]) allcopid.append(covinfpid) allnocopid.append(notcovinfpid) alldataframeco = alldataframeco.drop_duplicates() alldataframenoco = alldataframenoco.drop_duplicates() return (allcopid, allnocopid, alldataframeco, alldataframenoco) fullab = pd.concat([allcoab, allnocoab]) fullab = fullab.rename(columns={'cord_uid': 'pid'}) fullab = fullab[fullab.abstract != ''] lan = [] for i in fullab.abstract: lan1 = detect(i) lan.append(lan1) fullab['lan'] = lan fullab = fullab[fullab.lan == 'en'] fullab = fullab[['pid', 'title', 'abstract']] question = 'Q1' question_dir = question + '/' keylist = pd.read_csv('/kaggle/input/kagglecovid19literature/results/' + question_dir + 'keylist.txt').columns.values valuelist = pd.read_csv('/kaggle/input/kagglecovid19literature/results/' + question_dir + 'valuelist.txt', header=None).values viruslist = pd.read_csv('/kaggle/input/kagglecovid19literature/results/' + question_dir + 'viruslist.txt', header=None).values def build_raw_data(file): def retunsb(sentlist, i, lennu): sent = sentlist[i] if i - 1 < 0: present = '' else: present = sentlist[i - 1] if i + 1 >= lennu: aftsent = '' else: aftsent = sentlist[i + 1] tempsent = '' tempsent = tempsent.join([present, sent, aftsent]) return tempsent allfile = file allfile['abstract'] = allfile.abstract.astype(str) allsent = [] allid = [] allab = [] for i in tqdm(range(len(allfile))): temp = allfile.abstract.iloc[i] temp = sent_tokenize(temp) for j in range(len(temp)): tempab = retunsb(temp, j, len(temp)) allsent.append(temp[j]) allid.append(allfile.pid.iloc[i]) allab.append(tempab) allsent = pd.DataFrame(allsent, columns=['sent']) allsent['pid'] = allid allsent['abstract'] = allab return (allfile, allsent) def loop_labing(keylist, valuelist, virus): def keyword_lookup(x, keywords, virus, label): if any((word in x.sent.lower() for word in keywords)) and any((word in x.sent.lower() for word in virus)): return label return Norelevent def make_keyword_lf(keywords, virus, name, label=None): return LabelingFunction(name=f'keyword_{name}', f=keyword_lookup, resources=dict(keywords=keywords, virus=virus, label=label)) def keyword_lookup1(x, keywords, virus, label): if not any((word in x.sent.lower() for word in keywords)) and any((word in x.sent.lower() for word in virus)): return label return Norelevent def make_keyword_lf1(keywords, virus, name, label=None): return LabelingFunction(name=f'keyword_{name}', f=keyword_lookup1, resources=dict(keywords=keywords, virus=virus, label=label)) def abstract_lookup(x, keywords, virus, label): if any((word in x.abstract.lower() for word in keywords)) and any((word in x.abstract.lower() for word in virus)): return label return Norelevent def make_abstract_lf(keywords, virus, name, label=None): return LabelingFunction(name=f'abstract_{name}', f=abstract_lookup, resources=dict(keywords=keywords, virus=virus, label=label)) Norelevent = -1 allweaklabf = [] viruselist = virus for i in range(len(keylist)): labelvalue = 1 vbname = keylist[i] vbnameab = vbname + 'su' globals()[vbname] = make_keyword_lf(keywords=valuelist[i], virus=viruselist, name=vbnameab, label=labelvalue) vbname1 = keylist[i] + 'ab' vbnameab1 = vbname + 'su1' globals()[vbname1] = make_abstract_lf(keywords=valuelist[i], virus=viruselist, name=vbnameab1, label=labelvalue) vbname2 = keylist[i] + 'sent' vbnameab2 = vbname + 'sentno' globals()[vbname2] = make_keyword_lf1(keywords=valuelist[i], virus=viruselist, name=vbnameab2, label=0) allweaklabf.append(globals()[vbname]) allweaklabf.append(globals()[vbname1]) allweaklabf.append(globals()[vbname2]) return allweaklabf def snorkel_process(keylist, dataframe, allweaklabf): def func(x): idx = (-x).argsort()[1:] x[idx] = 0 return x cardinalitynu = 2 applier = PandasLFApplier(lfs=allweaklabf) all_train_l = applier.apply(df=dataframe) report = LFAnalysis(L=all_train_l, lfs=allweaklabf).lf_summary() label_model = LabelModel(cardinality=cardinalitynu, verbose=False) label_model.fit(all_train_l) predt = label_model.predict(all_train_l) dataframe['L_label'] = predt dataframe = dataframe[dataframe.L_label >= 0] train, test = train_test_split(dataframe, test_size=0.2, random_state=234) trainsent = train.sent.values trainlabel = train.L_label.values testsent = test.sent.values testlabel = test.L_label.values return (trainsent, trainlabel, testsent, testlabel, keylist, report) allweaklabf = loop_labing(keylist, valuelist, viruslist) trainsent, trainlabel, valsent, vallabel, keylist, report = snorkel_process(keylist, allsent, allweaklabf) testsent = allsent.sent.values testlabel = allsent.newpid.values
code
32062359/cell_2
[ "text_plain_output_1.png" ]
#!pip install -U git+https://github.com/dgunning/cord19.git !pip install -U git+https://github.com/dgunning/cord19.git@5b68c9a807f74f529b34d959f584712520da2f03 !pip install langdetect !pip install pandas !pip install tqdm !pip install snorkel
code