path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105183805/cell_8
[ "text_plain_output_1.png" ]
from efficientnet_pytorch import EfficientNet IMSIZE = 545 IMSIZE = EfficientNet.get_image_size('efficientnet-b5') print(IMSIZE)
code
105183805/cell_15
[ "text_plain_output_1.png" ]
from efficientnet_pytorch import EfficientNet from efficientnet_pytorch import EfficientNet IMSIZE = 545 IMSIZE = EfficientNet.get_image_size('efficientnet-b5') from efficientnet_pytorch import EfficientNet model_efficient = EfficientNet.from_pretrained('efficientnet-b7')
code
105183805/cell_17
[ "text_html_output_1.png" ]
from albumentations.pytorch.transforms import ToTensorV2 from efficientnet_pytorch import EfficientNet from efficientnet_pytorch import EfficientNet from sklearn.preprocessing import MultiLabelBinarizer from torch.utils.data import Dataset, DataLoader from transformers import get_cosine_schedule_with_warmup import cv2 import cv2 import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') df['labels'] = df['labels'].apply(lambda string: string.split(' ')) s = list(df['labels']) mlb = MultiLabelBinarizer() trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index) trainx.insert(0, 'image', df['image'], True) trainx t_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/sample_submission.csv') test_df = t_df.drop(['labels'], axis=1) test_df from sklearn.model_selection import train_test_split train_df = trainx train_df.reset_index(drop=True, inplace=True) test_df.reset_index(drop=True, inplace=True) class CustomDataset(Dataset): def __init__(self, df, root_dir, transform=None, iftest=False): self.df = df self.root_dir = root_dir self.transform = transform self.iftest = iftest def __len__(self): return len(self.df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = self.root_dir + self.df.iloc[idx, 0] image = cv2.imread(img_name, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: image = self.transform(image=image)['image'] if self.iftest: return image labels = torch.tensor(np.argmax(self.df.iloc[idx, 1:].values)) return (image, labels) IMSIZE = 545 IMSIZE = EfficientNet.get_image_size('efficientnet-b5') train_dataset = CustomDataset(df=train_df, root_dir='../input/plant-pathology-2021-fgvc8/train_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=25.0, p=0.7), OneOf([Emboss(p=1), Sharpen(p=1), Blur(p=1)], p=0.5), PiecewiseAffine(p=0.5), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()])) test_dataset = CustomDataset(df=test_df, root_dir='../input/plant-pathology-2021-fgvc8/test_images/', transform=Compose([augmentations.geometric.resize.Resize(height=IMSIZE, width=IMSIZE, always_apply=True), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensorV2()]), iftest=True) BATCH_SIZE = 1 train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2) use_cuda = torch.cuda.is_available() if use_cuda: device = 'cuda:0' use_tpu = False use_device = True if use_tpu: device = 'idk' from efficientnet_pytorch import EfficientNet model_efficient = EfficientNet.from_pretrained('efficientnet-b7') ad = False model_efficient._fc = nn.Sequential(nn.Linear(model_efficient._fc.in_features, 1000, bias=True), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(1000, 6, bias=True)) if use_device: model_efficient = model_efficient.to(device) NEPOCHS = 1 print(IMSIZE) criterion_transfer = nn.CrossEntropyLoss() learning_rate = 0.0008 optimizer_transfer = optim.AdamW(model_efficient.parameters(), learning_rate, weight_decay=0.001) num_train_steps = int(len(train_dataset) / BATCH_SIZE * NEPOCHS) from transformers import get_cosine_schedule_with_warmup scheduler = get_cosine_schedule_with_warmup(optimizer_transfer, num_warmup_steps=len(train_dataset) / BATCH_SIZE * 5, num_training_steps=num_train_steps)
code
105183805/cell_5
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd df = pd.read_csv('../input/plant-pathology-2021-fgvc8/train.csv') df['labels'] = df['labels'].apply(lambda string: string.split(' ')) s = list(df['labels']) mlb = MultiLabelBinarizer() trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index) trainx.insert(0, 'image', df['image'], True) trainx t_df = pd.read_csv('../input/plant-pathology-2021-fgvc8/sample_submission.csv') test_df = t_df.drop(['labels'], axis=1) test_df
code
121154019/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) def aggregate(df, cols): by_cols = df.groupby(cols).agg({'Survived': [('Total', 'count'), ('Survived', 'sum')]}) by_cols.columns = by_cols.columns.droplevel() by_cols['Died'] = by_cols['Total'] - by_cols['Survived'] by_cols['Survive Rate'] = 100 * by_cols['Survived'] / by_cols['Total'] return by_cols.sort_values('Total', ascending=False) aggregate(new_train, ['Male'])
code
121154019/cell_25
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.preprocessing import OneHotEncoder import pandas as pd import xgboost as xgb train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) xgb_classifier = xgb.XGBClassifier() xgb_classifier.fit(X_train_final, y_train) y_pred = xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy)
code
121154019/cell_33
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import pandas as pd import seaborn as sns import xgboost as xgb train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) new_train['AgeGroup'] = pd.cut(new_train['Age'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80]) age_survival = new_train.groupby(['AgeGroup'])['Survived'].mean().reset_index() new_train['FareGroup'] = pd.cut(new_train['Fare'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 600]) age_survival = new_train.groupby(['FareGroup'])['Survived'].mean().reset_index() new_train = new_train.drop(['AgeGroup', 'FareGroup'], axis=1) X_train, X_test, y_train, y_test = train_test_split(new_train.drop('Survived', axis=1), new_train['Survived'], test_size=0.2, random_state=42) cat_cols = ['Embarked', 'Pronoun', 'Prefix', 'Letter'] X_train_final, X_test_final, encoder = encode(X_train, X_test, cat_cols) xgb_classifier = xgb.XGBClassifier() xgb_classifier.fit(X_train_final, y_train) y_pred = xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) param_grid = {'max_depth': [3, 4, 5], 'learning_rate': [0.1, 0.01], 'n_estimators': [100, 200], 'gamma': [0, 0.1], 'colsample_bytree': [0.6, 0.8]} xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) best_xgb_classifier = grid_search.best_estimator_ y_pred = best_xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) new_test = wrangle(test) X_train_final, X_test_final, encoder = encode(new_train.drop('Survived', axis=1), new_test, cat_cols) y_train = new_train['Survived'] xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) final_xgb_classifier = grid_search.best_estimator_ importance = final_xgb_classifier.feature_importances_ features = X_train_final.columns encodings = encoder.get_feature_names() translated = [] for feature in features: if feature in range(len(encodings)): translated.append(encodings[feature]) else: translated.append(feature) df_importance = pd.DataFrame({'Feature': translated, 'Importance': importance}).sort_values('Importance', ascending=False) df_importance.head(10)
code
121154019/cell_20
[ "image_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) new_train['AgeGroup'] = pd.cut(new_train['Age'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80]) age_survival = new_train.groupby(['AgeGroup'])['Survived'].mean().reset_index() new_train['FareGroup'] = pd.cut(new_train['Fare'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 600]) age_survival = new_train.groupby(['FareGroup'])['Survived'].mean().reset_index() sns.barplot(x='FareGroup', y='Survived', data=age_survival) plt.title('Survival Rate by Fare Price') plt.xlabel('Age Group') plt.ylabel('Survival Rate') plt.show() new_train = new_train.drop(['AgeGroup', 'FareGroup'], axis=1)
code
121154019/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) new_train['AgeGroup'] = pd.cut(new_train['Age'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80]) age_survival = new_train.groupby(['AgeGroup'])['Survived'].mean().reset_index() sns.barplot(x='AgeGroup', y='Survived', data=age_survival) plt.title('Survival Rate by Age Group') plt.xlabel('Age Group') plt.ylabel('Survival Rate') plt.show()
code
121154019/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) sns.histplot(new_train['Fare'], bins=50)
code
121154019/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import pandas as pd import seaborn as sns import xgboost as xgb train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) new_train['AgeGroup'] = pd.cut(new_train['Age'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80]) age_survival = new_train.groupby(['AgeGroup'])['Survived'].mean().reset_index() new_train['FareGroup'] = pd.cut(new_train['Fare'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 600]) age_survival = new_train.groupby(['FareGroup'])['Survived'].mean().reset_index() new_train = new_train.drop(['AgeGroup', 'FareGroup'], axis=1) X_train, X_test, y_train, y_test = train_test_split(new_train.drop('Survived', axis=1), new_train['Survived'], test_size=0.2, random_state=42) cat_cols = ['Embarked', 'Pronoun', 'Prefix', 'Letter'] X_train_final, X_test_final, encoder = encode(X_train, X_test, cat_cols) xgb_classifier = xgb.XGBClassifier() xgb_classifier.fit(X_train_final, y_train) y_pred = xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) param_grid = {'max_depth': [3, 4, 5], 'learning_rate': [0.1, 0.01], 'n_estimators': [100, 200], 'gamma': [0, 0.1], 'colsample_bytree': [0.6, 0.8]} xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) best_xgb_classifier = grid_search.best_estimator_ y_pred = best_xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) new_test = wrangle(test) X_train_final, X_test_final, encoder = encode(new_train.drop('Survived', axis=1), new_test, cat_cols) y_train = new_train['Survived'] xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) final_xgb_classifier = grid_search.best_estimator_ print('Best hyperparameters:', grid_search.best_params_)
code
121154019/cell_28
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import OneHotEncoder import pandas as pd import xgboost as xgb train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) xgb_classifier = xgb.XGBClassifier() xgb_classifier.fit(X_train_final, y_train) y_pred = xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) param_grid = {'max_depth': [3, 4, 5], 'learning_rate': [0.1, 0.01], 'n_estimators': [100, 200], 'gamma': [0, 0.1], 'colsample_bytree': [0.6, 0.8]} xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) best_xgb_classifier = grid_search.best_estimator_ y_pred = best_xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) print('Accuracy with best hyperparameters:', accuracy)
code
121154019/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) def aggregate(df, cols): by_cols = df.groupby(cols).agg({'Survived': [('Total', 'count'), ('Survived', 'sum')]}) by_cols.columns = by_cols.columns.droplevel() by_cols['Died'] = by_cols['Total'] - by_cols['Survived'] by_cols['Survive Rate'] = 100 * by_cols['Survived'] / by_cols['Total'] return by_cols.sort_values('Total', ascending=False) aggregate(new_train, ['Pronoun'])
code
121154019/cell_16
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) def aggregate(df, cols): by_cols = df.groupby(cols).agg({'Survived': [('Total', 'count'), ('Survived', 'sum')]}) by_cols.columns = by_cols.columns.droplevel() by_cols['Died'] = by_cols['Total'] - by_cols['Survived'] by_cols['Survive Rate'] = 100 * by_cols['Survived'] / by_cols['Total'] return by_cols.sort_values('Total', ascending=False) aggregate(new_train, ['Pclass', 'Male'])
code
121154019/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) sns.histplot(new_train['Age'], bins=20)
code
121154019/cell_35
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import pandas as pd import seaborn as sns import xgboost as xgb train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) new_train = wrangle(train) sns.set(style='whitegrid') sns.despine(left=True) new_train['AgeGroup'] = pd.cut(new_train['Age'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80]) age_survival = new_train.groupby(['AgeGroup'])['Survived'].mean().reset_index() new_train['FareGroup'] = pd.cut(new_train['Fare'], bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 600]) age_survival = new_train.groupby(['FareGroup'])['Survived'].mean().reset_index() new_train = new_train.drop(['AgeGroup', 'FareGroup'], axis=1) X_train, X_test, y_train, y_test = train_test_split(new_train.drop('Survived', axis=1), new_train['Survived'], test_size=0.2, random_state=42) cat_cols = ['Embarked', 'Pronoun', 'Prefix', 'Letter'] X_train_final, X_test_final, encoder = encode(X_train, X_test, cat_cols) xgb_classifier = xgb.XGBClassifier() xgb_classifier.fit(X_train_final, y_train) y_pred = xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) param_grid = {'max_depth': [3, 4, 5], 'learning_rate': [0.1, 0.01], 'n_estimators': [100, 200], 'gamma': [0, 0.1], 'colsample_bytree': [0.6, 0.8]} xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) best_xgb_classifier = grid_search.best_estimator_ y_pred = best_xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) new_test = wrangle(test) X_train_final, X_test_final, encoder = encode(new_train.drop('Survived', axis=1), new_test, cat_cols) y_train = new_train['Survived'] xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) final_xgb_classifier = grid_search.best_estimator_ importance = final_xgb_classifier.feature_importances_ features = X_train_final.columns encodings = encoder.get_feature_names() translated = [] for feature in features: if feature in range(len(encodings)): translated.append(encodings[feature]) else: translated.append(feature) df_importance = pd.DataFrame({'Feature': translated, 'Importance': importance}).sort_values('Importance', ascending=False) y_pred = final_xgb_classifier.predict(X_test_final) prediction = pd.DataFrame() prediction['PassengerId'] = test['PassengerId'] prediction['Survived'] = y_pred prediction.to_csv('prediction.csv', index=False) prediction
code
121154019/cell_24
[ "image_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) X_train_final
code
121154019/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) def aggregate(df, cols): by_cols = df.groupby(cols).agg({'Survived': [('Total', 'count'), ('Survived', 'sum')]}) by_cols.columns = by_cols.columns.droplevel() by_cols['Died'] = by_cols['Total'] - by_cols['Survived'] by_cols['Survive Rate'] = 100 * by_cols['Survived'] / by_cols['Total'] return by_cols.sort_values('Total', ascending=False) aggregate(new_train, ['Embarked'])
code
121154019/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) new_train.head()
code
121154019/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import OneHotEncoder import pandas as pd import xgboost as xgb train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def encode(X_train, X_test, cat_cols): encoder = OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore') encoder.fit(X_train[cat_cols]) X_train_encoded = encoder.transform(X_train[cat_cols]) X_train_final = pd.concat([X_train.drop(cat_cols, axis=1), pd.DataFrame(X_train_encoded, index=X_train.index)], axis=1) X_test_encoded = encoder.transform(X_test[cat_cols]) X_test_final = pd.concat([X_test.drop(cat_cols, axis=1), pd.DataFrame(X_test_encoded, index=X_test.index)], axis=1) return (X_train_final, X_test_final, encoder) xgb_classifier = xgb.XGBClassifier() xgb_classifier.fit(X_train_final, y_train) y_pred = xgb_classifier.predict(X_test_final) accuracy = accuracy_score(y_test, y_pred) param_grid = {'max_depth': [3, 4, 5], 'learning_rate': [0.1, 0.01], 'n_estimators': [100, 200], 'gamma': [0, 0.1], 'colsample_bytree': [0.6, 0.8]} xgb_classifier = xgb.XGBClassifier() grid_search = GridSearchCV(estimator=xgb_classifier, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train_final, y_train) print('Best hyperparameters:', grid_search.best_params_)
code
121154019/cell_12
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') def wrangle(df): df['Male'] = df['Sex'].map(lambda x: x == 'male') df['Pronoun'] = df['Name'].map(lambda x: x.split(', ')[1].split('.')[0]) df['Prefix'] = df['Ticket'].map(lambda x: x.split(' ')[0].split('/')[0].replace('.', '') if ' ' in x else '') df['Letter'] = df['Cabin'].fillna('00').map(lambda x: x[0]) df['Family'] = df['SibSp'] + df['Parch'] df = df.drop(['PassengerId', 'Name', 'Ticket', 'Sex', 'Cabin'], axis=1) return df new_train = wrangle(train) def aggregate(df, cols): by_cols = df.groupby(cols).agg({'Survived': [('Total', 'count'), ('Survived', 'sum')]}) by_cols.columns = by_cols.columns.droplevel() by_cols['Died'] = by_cols['Total'] - by_cols['Survived'] by_cols['Survive Rate'] = 100 * by_cols['Survived'] / by_cols['Total'] return by_cols.sort_values('Total', ascending=False) aggregate(new_train, ['Pclass'])
code
121154019/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.head()
code
2045099/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) ted.speaker_occupation.value_counts().head(10)
code
2045099/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.describe()
code
2045099/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig = plt.figure() axes = fig.add_axes([0, 0, 1, 1]) axes.set_xlabel('views') axes.set_ylabel('comments') axes.plot(ted['views'], ted['comments'], ls='', marker='.')
code
2045099/cell_34
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_ylabel("comments") axes.plot(ted["views"],ted["comments"],ls="",marker=".") fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_xlim(2200000) axes.set_ylabel("comments") axes.set_ylim(200) axes.plot(ted["views"],ted["comments"],ls="",marker=".") labels = ted.num_speaker.unique() sizes = ted.num_speaker.value_counts() fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Number of presenters") plt.show() ted.num_speaker.unique() ted.num_speaker.value_counts() ted.columns ted.ratings[0]
code
2045099/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_ylabel("comments") axes.plot(ted["views"],ted["comments"],ls="",marker=".") fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_xlim(2200000) axes.set_ylabel("comments") axes.set_ylim(200) axes.plot(ted["views"],ted["comments"],ls="",marker=".") labels = ted.num_speaker.unique() sizes = ted.num_speaker.value_counts() fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') ax1.set_title('Number of presenters') plt.show()
code
2045099/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.head()
code
2045099/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) ted.event.value_counts().tail(10)
code
2045099/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2045099/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted[ted['speaker_occupation'].isnull()]
code
2045099/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) ted.event.value_counts().head(10)
code
2045099/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_ylabel("comments") axes.plot(ted["views"],ted["comments"],ls="",marker=".") fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_xlim(2200000) axes.set_ylabel("comments") axes.set_ylim(200) axes.plot(ted["views"],ted["comments"],ls="",marker=".") labels = ted.num_speaker.unique() sizes = ted.num_speaker.value_counts() fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Number of presenters") plt.show() ted.num_speaker.unique() ted.num_speaker.value_counts() ted.columns
code
2045099/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5)
code
2045099/cell_38
[ "text_plain_output_1.png" ]
import ast import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_ylabel("comments") axes.plot(ted["views"],ted["comments"],ls="",marker=".") fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_xlim(2200000) axes.set_ylabel("comments") axes.set_ylim(200) axes.plot(ted["views"],ted["comments"],ls="",marker=".") labels = ted.num_speaker.unique() sizes = ted.num_speaker.value_counts() fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Number of presenters") plt.show() ted.num_speaker.unique() ted.num_speaker.value_counts() ted.columns ted.ratings[0] import ast ted.ratings = ted.ratings.apply(lambda x: ast.literal_eval(x)) ted.ratings[0]
code
2045099/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_ylabel("comments") axes.plot(ted["views"],ted["comments"],ls="",marker=".") fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_xlim(2200000) axes.set_ylabel("comments") axes.set_ylim(200) axes.plot(ted["views"],ted["comments"],ls="",marker=".") labels = ted.num_speaker.unique() sizes = ted.num_speaker.value_counts() fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. ax1.set_title("Number of presenters") plt.show() ted.num_speaker.unique() ted.num_speaker.value_counts()
code
2045099/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5)
code
2045099/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr() ted.sort_values('views', ascending=False).head(5) ted.sort_values('views', ascending=True).head(5) fig=plt.figure() axes=fig.add_axes([0,0,1,1]) axes.set_xlabel("views") axes.set_ylabel("comments") axes.plot(ted["views"],ted["comments"],ls="",marker=".") fig = plt.figure() axes = fig.add_axes([0, 0, 1, 1]) axes.set_xlabel('views') axes.set_xlim(2200000) axes.set_ylabel('comments') axes.set_ylim(200) axes.plot(ted['views'], ted['comments'], ls='', marker='.')
code
2045099/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.corr()
code
2045099/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ted = pd.read_csv('../input/ted_main.csv') ted.info()
code
17115909/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies.loc[animated_movies['popularity'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['budget'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['profit'].idxmax(), 'original_title'] top_10 = animated_movies.nlargest(10, 'profit') top_10.index = top_10.original_title plt.figure(figsize=(12, 6)) plt.title('Profit Vs Year') plt.xlabel('Year') plt.ylabel('Profit in Billions') plt.scatter(animated_movies.release_year, animated_movies.profit, color='red') plt.show()
code
17115909/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies.loc[animated_movies['popularity'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['budget'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['profit'].idxmax(), 'original_title'] top_10 = animated_movies.nlargest(10, 'profit') top_10.index = top_10.original_title top_10[['original_title', 'profit']].plot.bar(figsize=(12, 6))
code
17115909/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies.head(5)
code
17115909/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies.loc[animated_movies['popularity'].idxmax(), 'original_title']
code
17115909/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
17115909/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies['production_companies'].value_counts().head(5).plot.bar(figsize=(10, 5))
code
17115909/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) print('Percentage of animated movies: {}%'.format(round(animated_movies_count / movie_count * 100, 2)))
code
17115909/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies.loc[animated_movies['popularity'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['budget'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['profit'].idxmax(), 'original_title']
code
17115909/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies.loc[animated_movies['popularity'].idxmax(), 'original_title'] animated_movies.loc[animated_movies['budget'].idxmax(), 'original_title']
code
17115909/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) imdb_movies = pd.read_csv('../input/imdb-movies.csv') movie_count = imdb_movies.shape[0] columns_to_keep = ['popularity', 'budget', 'revenue', 'original_title', 'director', 'runtime', 'genres', 'production_companies', 'release_year'] imdb_movies = imdb_movies[columns_to_keep] animated_movies = imdb_movies[imdb_movies['genres'].str.contains('Animation') == True] animated_movies_count = animated_movies.shape[0] animated_movies = animated_movies.assign(profit=pd.Series(animated_movies.revenue - animated_movies.budget).values) animated_movies['release_year'].value_counts(sort=False).plot.bar(figsize=(20, 5))
code
1004380/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] print('Number of training samples: {}'.format(n_samples)) print('Number of features: {}'.format(n_features)) print('Number of survivors: {}'.format(n_survived)) print('Number of deaths: {}'.format(n_died))
code
1004380/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from sklearn.svm import LinearSVC from sklearn.model_selection import train_test_split from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1004380/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') features = train_df.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis=1) labels = train_df['Survived'] n_samples = len(train_df) n_features = len(features.columns) n_survived = labels.value_counts()[1] n_died = labels.value_counts()[0] features.head(n=20)
code
105214007/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique() data.company_size.unique() data.company_size.replace({'<10': '1-9', '10/49': '10-49', '10000+': '10000-10500'}, inplace=True) data.company_size.unique()
code
105214007/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) sns.heatmap(data.isna())
code
105214007/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique() data.company_size.unique() data.company_size.replace({'<10': '1-9', '10/49': '10-49', '10000+': '10000-10500'}, inplace=True) data.company_size.unique() data.last_new_job.replace({'>4': '5', 'never': '0'}, inplace=True) data.last_new_job.unique() data.dtypes com_siz = [] for i in data.company_size: x, y = (i.split('-')[0], i.split('-')[1]) m = (int(x) + int(y)) / 2 com_siz.append(m) data.company_size = com_siz data.company_size.unique() data.dtypes
code
105214007/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.head()
code
105214007/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique() data.company_size.unique()
code
105214007/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105214007/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum()
code
105214007/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique() data.company_size.unique() data.company_size.replace({'<10': '1-9', '10/49': '10-49', '10000+': '10000-10500'}, inplace=True) data.company_size.unique() data.last_new_job.replace({'>4': '5', 'never': '0'}, inplace=True) data.last_new_job.unique() data.dtypes com_siz = [] for i in data.company_size: x, y = (i.split('-')[0], i.split('-')[1]) m = (int(x) + int(y)) / 2 com_siz.append(m) data.company_size = com_siz data.company_size.unique()
code
105214007/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique()
code
105214007/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique() data.company_size.unique() data.company_size.replace({'<10': '1-9', '10/49': '10-49', '10000+': '10000-10500'}, inplace=True) data.company_size.unique() data.last_new_job.replace({'>4': '5', 'never': '0'}, inplace=True) data.last_new_job.unique()
code
105214007/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique() data.company_size.unique() data.company_size.replace({'<10': '1-9', '10/49': '10-49', '10000+': '10000-10500'}, inplace=True) data.company_size.unique() data.last_new_job.replace({'>4': '5', 'never': '0'}, inplace=True) data.last_new_job.unique() data.dtypes
code
105214007/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum()
code
105214007/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique() data.fillna({'gender': 'Other', 'enrolled_university': 'no_enrollment', 'education_level': 'Other', 'major_discipline': 'Other', 'experience': '0', 'company_size': '<10', 'company_type': 'Other', 'last_new_job': 'never'}, inplace=True) data.isna().sum() data.experience.unique() data.experience.replace({'>20': '22', '<1': '0'}, inplace=True) data.experience.unique()
code
105214007/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv') data.isna().sum() import seaborn as sns import matplotlib.pyplot as plt data.gender.unique() data.enrolled_university.unique() data.education_level.unique() data.major_discipline.unique() data.experience.unique() data.company_size.unique() data.company_type.unique() data.last_new_job.unique()
code
122252043/cell_42
[ "text_plain_output_1.png" ]
x = 12 y = 8 y >> 2
code
122252043/cell_63
[ "text_plain_output_1.png" ]
int('0o65416', base=8)
code
122252043/cell_81
[ "text_plain_output_1.png" ]
a = 12670 b = 12.344 print(f"a={{{a:5d}}},b='{b:>4.0f}'")
code
122252043/cell_13
[ "text_plain_output_1.png" ]
375
code
122252043/cell_9
[ "text_plain_output_1.png" ]
74155
code
122252043/cell_4
[ "text_plain_output_1.png" ]
type('2+3')
code
122252043/cell_83
[ "text_plain_output_1.png" ]
num = input('A number:')
code
122252043/cell_79
[ "text_plain_output_1.png" ]
a = 12670 b = 12.344 print(f'a={a:>+7,d},b={b:>06.1f}')
code
122252043/cell_20
[ "text_plain_output_1.png" ]
num = 12 num = 12 + 7.2 type(num)
code
122252043/cell_55
[ "text_plain_output_1.png" ]
eval("int('2020')")
code
122252043/cell_6
[ "text_plain_output_1.png" ]
type(18)
code
122252043/cell_74
[ "text_plain_output_1.png" ]
print("'''\\n represents a new line character'''")
code
122252043/cell_40
[ "text_plain_output_1.png" ]
x = 5 x += 2 x = 12 y = 8 x << 2
code
122252043/cell_29
[ "text_plain_output_1.png" ]
2 < 8 or (7 <= 8 and 7 > 2)
code
122252043/cell_39
[ "text_plain_output_1.png" ]
x = 5 x += 2 x = 12 y = 8 x >> 2
code
122252043/cell_26
[ "text_plain_output_1.png" ]
'kitty' < 'kitten'
code
122252043/cell_48
[ "text_plain_output_1.png" ]
(not 'piggy') + True
code
122252043/cell_73
[ "text_plain_output_1.png" ]
print('A back slash \\ sign.')
code
122252043/cell_41
[ "text_plain_output_1.png" ]
x = 12 y = 8 ~y
code
122252043/cell_54
[ "text_plain_output_1.png" ]
eval('6**2+3*(7-1)')
code
122252043/cell_72
[ "text_plain_output_1.png" ]
print('"You may say I\'m a dreamer"')
code
122252043/cell_67
[ "text_plain_output_1.png" ]
oct(int('0b1001001', base=2))
code
122252043/cell_11
[ "text_plain_output_1.png" ]
3737
code
122252043/cell_60
[ "text_plain_output_1.png" ]
hex(1024)
code
122252043/cell_86
[ "text_plain_output_1.png" ]
nofd = input('A num of d:')
code
122252043/cell_64
[ "text_plain_output_1.png" ]
hex(int('0o65416', base=8))
code
122252043/cell_32
[ "text_plain_output_1.png" ]
x = 5 x += 2 x
code
122252043/cell_68
[ "text_plain_output_1.png" ]
hex(int('0b1001001', base=2))
code
122252043/cell_62
[ "text_plain_output_1.png" ]
bin(int('0o65416', base=8))
code
122252043/cell_59
[ "text_plain_output_1.png" ]
oct(1024)
code
122252043/cell_58
[ "text_plain_output_1.png" ]
bin(1024)
code
122252043/cell_28
[ "text_plain_output_1.png" ]
False and 'kitty' or True
code
122252043/cell_78
[ "text_plain_output_1.png" ]
a = 12670 b = 12.344 print(f'a={a:>+7d},b={b:>06.2f}')
code