path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
129023624/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') test_df
code
129023624/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129023624/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape
code
129023624/cell_45
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int) y_pred
code
129023624/cell_28
[ "text_plain_output_1.png" ]
!pip install sentencepiece
code
129023624/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count
code
129023624/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape sns.set(style='darkgrid') ax = sns.countplot(x=train_df['target'], data=train_df)
code
129023624/cell_47
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict, KFold, GridSearchCV from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.xticks(fontsize=14) plt.yticks(fontsize=14) max_len = round(max(token_sentence_length)) max_len tokenized_feature = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs = tokenized_feature['input_ids'] train_padded_docs = np.array(padded_inputs) labels = np.array(train_df['target']) def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm kfold = KFold(n_splits=10, shuffle=True, random_state=42) model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int) for train_index, test_index in kfold.split(train_padded_docs): X_train, X_test = (train_padded_docs[train_index], train_padded_docs[test_index]) y_train, y_test = (labels[train_index], labels[test_index]) model = create_rnn_model(X_train.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=10, batch_size=64, verbose=0) loss, accuracy = model.evaluate(X_test, y_test, verbose=0) print('Fold accuracy:', accuracy)
code
129023624/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, KFold, GridSearchCV from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix, precision_recall_fscore_support import re import tensorflow as tf import keras from keras import backend as K from keras.models import Sequential, Model, load_model from keras.preprocessing.text import Tokenizer from keras.layers import Dropout, Activation, Flatten, Embedding, Convolution1D, MaxPooling1D, AveragePooling1D, Input, Dense, Add, TimeDistributed, Bidirectional, SpatialDropout1D, GlobalMaxPool1D from keras.layers import LSTM, GRU, SimpleRNN from keras.regularizers import l2, l1_l2 from keras.constraints import maxnorm from keras import callbacks from keras.callbacks import ModelCheckpoint, EarlyStopping from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import KFold import contractions
code
129023624/cell_35
[ "text_plain_output_1.png" ]
from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.xticks(fontsize=14) plt.yticks(fontsize=14) avg_length = round(avg_length) avg_length
code
129023624/cell_43
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs)
code
129023624/cell_31
[ "text_plain_output_1.png" ]
from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE)
code
129023624/cell_46
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix,precision_recall_fscore_support from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int) print(classification_report(y_test_xlm, y_pred))
code
129023624/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape train_df['lower_cased']
code
129023624/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape
code
129023624/cell_53
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict, KFold, GridSearchCV from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.xticks(fontsize=14) plt.yticks(fontsize=14) max_len = round(max(token_sentence_length)) max_len tokenized_feature = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs = tokenized_feature['input_ids'] train_padded_docs = np.array(padded_inputs) labels = np.array(train_df['target']) def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm kfold = KFold(n_splits=10, shuffle=True, random_state=42) model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int) for train_index, test_index in kfold.split(train_padded_docs): X_train, X_test = (train_padded_docs[train_index], train_padded_docs[test_index]) y_train, y_test = (labels[train_index], labels[test_index]) model = create_rnn_model(X_train.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=10, batch_size=64, verbose=0) loss, accuracy = model.evaluate(X_test, y_test, verbose=0) tokenized_feature_test_data = xlm_tokenizer.batch_encode_plus(test_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs_test = tokenized_feature_test_data['input_ids'] predictions = (model.predict(padded_inputs_test) > 0.5).astype(int)
code
129023624/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1)
code
129023624/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum()
code
129023624/cell_36
[ "application_vnd.jupyter.stderr_output_1.png" ]
from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.xticks(fontsize=14) plt.yticks(fontsize=14) max_len = round(max(token_sentence_length)) max_len
code
73099194/cell_9
[ "text_html_output_1.png" ]
dicom_test = Dicom() dicom_test.exec('test')
code
73099194/cell_11
[ "text_plain_output_1.png" ]
dicom_test = Dicom() dicom_test.exec('test') dicom_test.df.head()
code
73099194/cell_8
[ "text_html_output_1.png" ]
dicom_train = Dicom() dicom_train.exec('train')
code
73099194/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
code
73099194/cell_10
[ "text_plain_output_1.png" ]
dicom_train = Dicom() dicom_train.exec('train') dicom_train.df.head()
code
73099194/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pydicom import dcmread from pydicom import dcmread data_dir = '/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification' fpath = data_dir + '/train/00000/FLAIR/Image-1.dcm' ds = dcmread(fpath) print(ds)
code
50225023/cell_21
[ "text_plain_output_1.png" ]
from collections import defaultdict from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show() fig,(ax1,ax2) = plt.subplots(1,2,figsize=(10,5)) # No Disaster Tweets train_len = train[train['target']==0]['text'].str.len() ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') fig.suptitle('Characters in tweets') # Disaster Tweets train_len = train[train['target']==1]['text'].str.len() ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') plt.show() fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5)) train_len = train[train['target']==0]['text'].str.split().map(lambda x: len(x)) ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') train_len = train[train['target']==1]['text'].str.split().map(lambda x: len(x)) ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') fig.suptitle('Words in a tweet') plt.show() def create_corpus(target): corpus = [] for x in train[train['target'] == target]['text'].str.split(): for i in x: corpus.append(i) return corpus corpus0 = create_corpus(0) corpus1 = create_corpus(1) len(corpus0) dic = defaultdict(int) for word in corpus0: if word in stop: dic[word] += 1 top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10] x, y = zip(*top) dic = defaultdict(int) for word in corpus1: if word in stop: dic[word] += 1 top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10] x, y = zip(*top) plt.bar(x, y)
code
50225023/cell_13
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5)) train_len = train[train['target'] == 0]['text'].str.len() ax1.hist(train_len, color='green') ax1.set_title('Not disaster tweets') fig.suptitle('Characters in tweets') train_len = train[train['target'] == 1]['text'].str.len() ax2.hist(train_len, color='red') ax2.set_title('Disaster tweets') plt.show()
code
50225023/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import defaultdict from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import string import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show() fig,(ax1,ax2) = plt.subplots(1,2,figsize=(10,5)) # No Disaster Tweets train_len = train[train['target']==0]['text'].str.len() ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') fig.suptitle('Characters in tweets') # Disaster Tweets train_len = train[train['target']==1]['text'].str.len() ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') plt.show() fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5)) train_len = train[train['target']==0]['text'].str.split().map(lambda x: len(x)) ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') train_len = train[train['target']==1]['text'].str.split().map(lambda x: len(x)) ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') fig.suptitle('Words in a tweet') plt.show() def create_corpus(target): corpus = [] for x in train[train['target'] == target]['text'].str.split(): for i in x: corpus.append(i) return corpus corpus0 = create_corpus(0) corpus1 = create_corpus(1) len(corpus0) dic = defaultdict(int) for word in corpus0: if word in stop: dic[word] += 1 top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10] x, y = zip(*top) dic = defaultdict(int) for word in corpus1: if word in stop: dic[word] += 1 top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10] x, y = zip(*top) plt.figure(figsize=(10, 5)) dic = defaultdict(int) special = string.punctuation for i in corpus1: if i in special: dic[i] += 1 x, y = zip(*dic.items()) plt.bar(x, y)
code
50225023/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') train.head()
code
50225023/cell_19
[ "image_output_1.png" ]
from collections import defaultdict from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show() fig,(ax1,ax2) = plt.subplots(1,2,figsize=(10,5)) # No Disaster Tweets train_len = train[train['target']==0]['text'].str.len() ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') fig.suptitle('Characters in tweets') # Disaster Tweets train_len = train[train['target']==1]['text'].str.len() ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') plt.show() fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5)) train_len = train[train['target']==0]['text'].str.split().map(lambda x: len(x)) ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') train_len = train[train['target']==1]['text'].str.split().map(lambda x: len(x)) ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') fig.suptitle('Words in a tweet') plt.show() def create_corpus(target): corpus = [] for x in train[train['target'] == target]['text'].str.split(): for i in x: corpus.append(i) return corpus corpus0 = create_corpus(0) corpus1 = create_corpus(1) len(corpus0) dic = defaultdict(int) for word in corpus0: if word in stop: dic[word] += 1 top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10] x, y = zip(*top) plt.bar(x, y, color='green')
code
50225023/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') print('There are {} rows and {} columns in train'.format(train.shape[0], train.shape[1])) print('There are {} rows and {} columns in test'.format(test.shape[0], test.shape[1]))
code
50225023/cell_16
[ "image_output_1.png" ]
from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show() fig,(ax1,ax2) = plt.subplots(1,2,figsize=(10,5)) # No Disaster Tweets train_len = train[train['target']==0]['text'].str.len() ax1.hist(train_len,color='green') ax1.set_title('Not disaster tweets') fig.suptitle('Characters in tweets') # Disaster Tweets train_len = train[train['target']==1]['text'].str.len() ax2.hist(train_len,color='red') ax2.set_title('Disaster tweets') plt.show() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5)) train_len = train[train['target'] == 0]['text'].str.split().map(lambda x: len(x)) ax1.hist(train_len, color='green') ax1.set_title('Not disaster tweets') train_len = train[train['target'] == 1]['text'].str.split().map(lambda x: len(x)) ax2.hist(train_len, color='red') ax2.set_title('Disaster tweets') fig.suptitle('Words in a tweet') plt.show()
code
50225023/cell_17
[ "image_output_1.png" ]
from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show() def create_corpus(target): corpus = [] for x in train[train['target'] == target]['text'].str.split(): for i in x: corpus.append(i) return corpus corpus0 = create_corpus(0) corpus1 = create_corpus(1) len(corpus0)
code
50225023/cell_10
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords, wordnet import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('ggplot') from collections import Counter from collections import defaultdict from sklearn.feature_extraction.text import CountVectorizer from nltk.util import ngrams from nltk.tokenize import word_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer stop = set(stopwords.words('english')) from tqdm.notebook import tqdm import os import re import time import string import random import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F from torchtext import data, datasets from torchtext.vocab import Vectors, GloVe train = pd.read_csv('../input/nlp-getting-started/train.csv') test = pd.read_csv('../input/nlp-getting-started/test.csv') fig, axes = plt.subplots(ncols=2, figsize=(17, 4), dpi=100) train.groupby('target').count()['id'].plot(kind='pie', ax=axes[0], labels=['Not Disaster (57%)', 'Disaster (43%)']) sns.countplot(x=train['target'], hue=train['target'], ax=axes[1]) axes[0].set_ylabel('') axes[1].set_ylabel('') axes[1].set_xticklabels(['Not Disaster (4342)', 'Disaster (3271)']) axes[0].tick_params(axis='x', labelsize=15) axes[0].tick_params(axis='y', labelsize=15) axes[1].tick_params(axis='x', labelsize=15) axes[1].tick_params(axis='y', labelsize=15) axes[0].set_title('Target Distribution in Training Set', fontsize=13) axes[1].set_title('Target Count in Training Set', fontsize=13) plt.show()
code
90126740/cell_9
[ "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset dataset.describe(include='all')
code
90126740/cell_11
[ "text_html_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset dataset.isnull().sum() dataset.nunique()
code
90126740/cell_19
[ "image_output_1.png" ]
from sklearn.cluster import AgglomerativeClustering import matplotlib.pyplot as plt import pandas as pd import scipy.cluster.hierarchy as sch dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset dataset.isnull().sum() dataset.nunique() X = dataset.iloc[:, 1:3].values.round(2) import scipy.cluster.hierarchy as sch den = sch.dendrogram(sch.linkage(X, method='ward')) from sklearn.cluster import AgglomerativeClustering hc = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward') y_hc = hc.fit_predict(X) plt.figure(figsize=(10, 7)) plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s=100, c='red', label='Cluster 1') plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s=100, c='blue', label='Cluster 2') plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s=100, c='green', label='Cluster 3') plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s=100, c='cyan', label='Cluster 4') plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s=100, c='magenta', label='Cluster 5') plt.title('Clusters of students') plt.xlabel('Number of courses') plt.ylabel('Time spent to study') plt.legend() plt.show()
code
90126740/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset
code
90126740/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import scipy.cluster.hierarchy as sch dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset dataset.isnull().sum() dataset.nunique() X = dataset.iloc[:, 1:3].values.round(2) import scipy.cluster.hierarchy as sch den = sch.dendrogram(sch.linkage(X, method='ward')) plt.title('Dendrogram') plt.xlabel('Number of courses') plt.ylabel('Time spent to study') plt.show()
code
90126740/cell_17
[ "text_plain_output_1.png" ]
from sklearn.cluster import AgglomerativeClustering import pandas as pd dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset dataset.isnull().sum() dataset.nunique() X = dataset.iloc[:, 1:3].values.round(2) from sklearn.cluster import AgglomerativeClustering hc = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward') y_hc = hc.fit_predict(X) print('setup complete')
code
90126740/cell_10
[ "text_html_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/student-marks-dataset/Student_Marks.csv') dataset dataset.isnull().sum()
code
105204136/cell_13
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) d_droped_train = train_df.drop_duplicates(train_df.columns.drop(['customer_id'])) d_droped_train = d_droped_train.drop(columns=['Unnamed: 20']) cols = d_droped_train.select_dtypes([np.number]).columns d_droped_train[cols] = d_droped_train[cols].abs() d_droped_train['account_length'].fillna(d_droped_train.account_length.median(), inplace=True) d_droped_train['intertiol_plan'].fillna('no', inplace=True) d_droped_train['voice_mail_plan'].fillna('no', inplace=True) d_droped_train.loc[d_droped_train['voice_mail_plan'] == 'no', 'number_vm_messages'] = 0 d_droped_train.loc[(d_droped_train['voice_mail_plan'] == 'yes') & d_droped_train['number_vm_messages'].isnull(), 'number_vm_messages'] = d_droped_train[d_droped_train.voice_mail_plan == 'yes'].number_vm_messages.median() d_droped_train.loc[d_droped_train['total_day_min'] > 500, 'total_day_min'] = np.nan d_droped_train['total_day_min'] = d_droped_train.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_day_calls'] > 350, 'total_day_calls'] = np.nan d_droped_train['total_day_calls'] = d_droped_train.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() d_droped_train['total_day_charge'] = d_droped_train.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_eve_min'] > 500, 'total_eve_min'] = np.nan d_droped_train['total_eve_min'] = d_droped_train.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_eve_calls'] = d_droped_train.sort_values(['total_eve_min']).total_eve_calls.ffill().sort_index() d_droped_train['total_eve_charge'] = d_droped_train.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_night_minutes'] > 500, 'total_night_minutes'] = np.nan d_droped_train['total_night_minutes'] = d_droped_train.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_night_calls'] = d_droped_train.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() d_droped_train.loc[d_droped_train['total_night_charge'] > 150, 'total_night_charge'] = np.nan d_droped_train['total_night_charge'] = d_droped_train.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_intl_minutes'] = d_droped_train.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[(d_droped_train['total_intl_minutes'] > 0) & (d_droped_train['total_intl_charge'] > 0) & (d_droped_train['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan d_droped_train['total_intl_calls'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() d_droped_train['total_intl_charge'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['customer_service_calls'].fillna(1, inplace=True) odm_handled_train = d_droped_train.dropna(subset=['Churn']) test_df = test_df.drop(columns=['Unnamed: 19', 'Unnamed: 20']) cols = test_df.select_dtypes([np.number]).columns test_df[cols] = test_df[cols].abs() test_df['location_code'] = test_df['location_code'].ffill() test_df['intertiol_plan'].fillna('no', inplace=True) test_df['voice_mail_plan'].fillna('no', inplace=True) test_df['number_vm_messages'].fillna(0, inplace=True) test_df['total_day_min'] = test_df.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_day_calls'] = test_df.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() test_df['total_day_charge'] = test_df.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_min'] = test_df.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_charge'] = test_df.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_minutes'] = test_df.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_calls'] = test_df.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() test_df['total_night_charge'] = test_df.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_intl_minutes'] = test_df.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df.loc[(test_df['total_intl_minutes'] > 0) & (test_df['total_intl_charge'] > 0) & (test_df['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan test_df['total_intl_calls'] = test_df.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() test_df['customer_service_calls'].fillna(1, inplace=True) train = odm_handled_train.copy() test = test_df.copy() train.info()
code
105204136/cell_9
[ "image_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) plt_distribution(test_df)
code
105204136/cell_6
[ "text_html_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) train_df.info()
code
105204136/cell_7
[ "image_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) plt_distribution(train_df)
code
105204136/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) test_df.info()
code
105204136/cell_15
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) d_droped_train = train_df.drop_duplicates(train_df.columns.drop(['customer_id'])) d_droped_train = d_droped_train.drop(columns=['Unnamed: 20']) cols = d_droped_train.select_dtypes([np.number]).columns d_droped_train[cols] = d_droped_train[cols].abs() d_droped_train['account_length'].fillna(d_droped_train.account_length.median(), inplace=True) d_droped_train['intertiol_plan'].fillna('no', inplace=True) d_droped_train['voice_mail_plan'].fillna('no', inplace=True) d_droped_train.loc[d_droped_train['voice_mail_plan'] == 'no', 'number_vm_messages'] = 0 d_droped_train.loc[(d_droped_train['voice_mail_plan'] == 'yes') & d_droped_train['number_vm_messages'].isnull(), 'number_vm_messages'] = d_droped_train[d_droped_train.voice_mail_plan == 'yes'].number_vm_messages.median() d_droped_train.loc[d_droped_train['total_day_min'] > 500, 'total_day_min'] = np.nan d_droped_train['total_day_min'] = d_droped_train.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_day_calls'] > 350, 'total_day_calls'] = np.nan d_droped_train['total_day_calls'] = d_droped_train.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() d_droped_train['total_day_charge'] = d_droped_train.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_eve_min'] > 500, 'total_eve_min'] = np.nan d_droped_train['total_eve_min'] = d_droped_train.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_eve_calls'] = d_droped_train.sort_values(['total_eve_min']).total_eve_calls.ffill().sort_index() d_droped_train['total_eve_charge'] = d_droped_train.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_night_minutes'] > 500, 'total_night_minutes'] = np.nan d_droped_train['total_night_minutes'] = d_droped_train.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_night_calls'] = d_droped_train.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() d_droped_train.loc[d_droped_train['total_night_charge'] > 150, 'total_night_charge'] = np.nan d_droped_train['total_night_charge'] = d_droped_train.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_intl_minutes'] = d_droped_train.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[(d_droped_train['total_intl_minutes'] > 0) & (d_droped_train['total_intl_charge'] > 0) & (d_droped_train['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan d_droped_train['total_intl_calls'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() d_droped_train['total_intl_charge'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['customer_service_calls'].fillna(1, inplace=True) odm_handled_train = d_droped_train.dropna(subset=['Churn']) test_df = test_df.drop(columns=['Unnamed: 19', 'Unnamed: 20']) cols = test_df.select_dtypes([np.number]).columns test_df[cols] = test_df[cols].abs() test_df['location_code'] = test_df['location_code'].ffill() test_df['intertiol_plan'].fillna('no', inplace=True) test_df['voice_mail_plan'].fillna('no', inplace=True) test_df['number_vm_messages'].fillna(0, inplace=True) test_df['total_day_min'] = test_df.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_day_calls'] = test_df.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() test_df['total_day_charge'] = test_df.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_min'] = test_df.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_charge'] = test_df.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_minutes'] = test_df.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_calls'] = test_df.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() test_df['total_night_charge'] = test_df.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_intl_minutes'] = test_df.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df.loc[(test_df['total_intl_minutes'] > 0) & (test_df['total_intl_charge'] > 0) & (test_df['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan test_df['total_intl_calls'] = test_df.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() test_df['customer_service_calls'].fillna(1, inplace=True) train = odm_handled_train.copy() test = test_df.copy() plt_distribution(train)
code
105204136/cell_16
[ "image_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) d_droped_train = train_df.drop_duplicates(train_df.columns.drop(['customer_id'])) d_droped_train = d_droped_train.drop(columns=['Unnamed: 20']) cols = d_droped_train.select_dtypes([np.number]).columns d_droped_train[cols] = d_droped_train[cols].abs() d_droped_train['account_length'].fillna(d_droped_train.account_length.median(), inplace=True) d_droped_train['intertiol_plan'].fillna('no', inplace=True) d_droped_train['voice_mail_plan'].fillna('no', inplace=True) d_droped_train.loc[d_droped_train['voice_mail_plan'] == 'no', 'number_vm_messages'] = 0 d_droped_train.loc[(d_droped_train['voice_mail_plan'] == 'yes') & d_droped_train['number_vm_messages'].isnull(), 'number_vm_messages'] = d_droped_train[d_droped_train.voice_mail_plan == 'yes'].number_vm_messages.median() d_droped_train.loc[d_droped_train['total_day_min'] > 500, 'total_day_min'] = np.nan d_droped_train['total_day_min'] = d_droped_train.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_day_calls'] > 350, 'total_day_calls'] = np.nan d_droped_train['total_day_calls'] = d_droped_train.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() d_droped_train['total_day_charge'] = d_droped_train.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_eve_min'] > 500, 'total_eve_min'] = np.nan d_droped_train['total_eve_min'] = d_droped_train.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_eve_calls'] = d_droped_train.sort_values(['total_eve_min']).total_eve_calls.ffill().sort_index() d_droped_train['total_eve_charge'] = d_droped_train.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_night_minutes'] > 500, 'total_night_minutes'] = np.nan d_droped_train['total_night_minutes'] = d_droped_train.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_night_calls'] = d_droped_train.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() d_droped_train.loc[d_droped_train['total_night_charge'] > 150, 'total_night_charge'] = np.nan d_droped_train['total_night_charge'] = d_droped_train.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_intl_minutes'] = d_droped_train.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[(d_droped_train['total_intl_minutes'] > 0) & (d_droped_train['total_intl_charge'] > 0) & (d_droped_train['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan d_droped_train['total_intl_calls'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() d_droped_train['total_intl_charge'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['customer_service_calls'].fillna(1, inplace=True) odm_handled_train = d_droped_train.dropna(subset=['Churn']) test_df = test_df.drop(columns=['Unnamed: 19', 'Unnamed: 20']) cols = test_df.select_dtypes([np.number]).columns test_df[cols] = test_df[cols].abs() test_df['location_code'] = test_df['location_code'].ffill() test_df['intertiol_plan'].fillna('no', inplace=True) test_df['voice_mail_plan'].fillna('no', inplace=True) test_df['number_vm_messages'].fillna(0, inplace=True) test_df['total_day_min'] = test_df.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_day_calls'] = test_df.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() test_df['total_day_charge'] = test_df.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_min'] = test_df.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_charge'] = test_df.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_minutes'] = test_df.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_calls'] = test_df.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() test_df['total_night_charge'] = test_df.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_intl_minutes'] = test_df.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df.loc[(test_df['total_intl_minutes'] > 0) & (test_df['total_intl_charge'] > 0) & (test_df['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan test_df['total_intl_calls'] = test_df.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() test_df['customer_service_calls'].fillna(1, inplace=True) train = odm_handled_train.copy() test = test_df.copy() plt.figure(figsize=(16, 16)) sns.pairplot(train, hue='Churn') plt.show()
code
105204136/cell_14
[ "image_output_1.png" ]
from sklearn.feature_selection import mutual_info_classif from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, f1_score, make_scorer from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns rs = 42 mi = 10000 sns.set_style('whitegrid') models = [DecisionTreeClassifier(random_state=rs), KNeighborsClassifier(), GaussianNB(), LinearSVC(random_state=rs, max_iter=mi), SVC(random_state=rs, max_iter=mi), LogisticRegression(random_state=rs, max_iter=mi)] train_path = '../input/cs-3110-mini-project/train.csv' test_path = '../input/cs-3110-mini-project/test.csv' def plt_distribution(dataset): data = dataset.copy() fig, axes = plt.subplots(5, 3) fig.set_figwidth(16) fig.set_figheight(20) sns.histplot(data=data, x='account_length', kde=True, ax=axes[0,0]) sns.histplot(data=data, x='number_vm_messages', kde=True, ax=axes[0,1]) sns.histplot(data=data, x='total_day_min', kde=True, ax=axes[0,2]) sns.histplot(data=data, x='total_day_calls', kde=True, ax=axes[1,0]) sns.histplot(data=data, x='total_day_charge', kde=True, ax=axes[1,1]) sns.histplot(data=data, x='total_eve_min', kde=True, ax=axes[1,2]) sns.histplot(data=data, x='total_eve_calls', kde=True, ax=axes[2,0]) sns.histplot(data=data, x='total_eve_charge', kde=True, ax=axes[2,1]) sns.histplot(data=data, x='total_night_minutes', kde=True, ax=axes[2,2]) sns.histplot(data=data, x='total_night_calls', kde=True, ax=axes[3,0]) sns.histplot(data=data, x='total_night_charge', kde=True, ax=axes[3,1]) sns.histplot(data=data, x='total_intl_minutes', kde=True, ax=axes[3,2]) sns.histplot(data=data, x='total_intl_calls', kde=True, ax=axes[4,0]) sns.histplot(data=data, x='total_intl_charge', kde=True, ax=axes[4,1]) sns.histplot(data=data, x='customer_service_calls', kde=True, ax=axes[4,2]) plt.show() def make_mi_scores(X, y): discrete_features = X.dtypes == int mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features, random_state=rs) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): plt.figure(dpi=100, figsize=(8, 10)) scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") plt.show() def evaluate_for_models(models, X, y): results = pd.DataFrame({'Model': [], 'ScoreMean(F1)': [], 'Score Standard Deviation(F1)': [], 'ScoreMean': [], 'Score Standard Deviation': []}) for model in models: score_f1 = cross_val_score(model, X, y, scoring='f1') score = cross_val_score(model, X, y) new_result = {'Model': model.__class__.__name__, 'ScoreMean(F1)': score_f1.mean(), 'Score Standard Deviation(F1)': score_f1.std(), 'ScoreMean': score.mean(), 'Score Standard Deviation': score.std()} results = results.append(new_result, ignore_index=True) return results.sort_values(by=['ScoreMean(F1)', 'Score Standard Deviation(F1)', 'ScoreMean', 'Score Standard Deviation'], ascending=False) def classification_report_with_accuracy_score(y_true, y_pred): print(classification_report(y_true, y_pred)) return f1_score(y_true, y_pred) def encode(dataframe, is_train=True): data = dataframe.copy() encoded_data = pd.get_dummies(data, columns=['location_code']) if is_train: encoded_data['Churn'] = encoded_data['Churn'].map({'Yes': 1, 'No': 0}) for col in ['intertiol_plan', 'voice_mail_plan']: encoded_data[col] = encoded_data[col].map({'yes': 1, 'no': 0}) return encoded_data def add_features(dataframe): global lr_day global lr_eve global lr_nyt data = dataframe.copy() data['total_min'] = data['total_day_min'] + data['total_eve_min'] + data['total_night_minutes'] + data['total_intl_minutes'] try: data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) except NameError: lr_day = LinearRegression() lr_eve = LinearRegression() lr_nyt = LinearRegression() lr_day.fit(data[['total_day_min']], data['total_day_charge']) lr_eve.fit(data[['total_eve_min']], data['total_eve_charge']) lr_nyt.fit(data[['total_night_minutes']], data['total_night_charge']) data['expected_total_day_charge'] = lr_day.predict(data[['total_day_min']]) data['expected_total_eve_charge'] = lr_eve.predict(data[['total_eve_min']]) data['expected_total_nyt_charge'] = lr_nyt.predict(data[['total_night_minutes']]) data['error_total_day_charge'] = abs(data['expected_total_day_charge'] - data['total_day_charge']) data['error_total_eve_charge'] = abs(data['expected_total_eve_charge'] - data['total_eve_charge']) data['error_total_nyt_charge'] = abs(data['expected_total_nyt_charge'] - data['total_night_charge']) return data train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) d_droped_train = train_df.drop_duplicates(train_df.columns.drop(['customer_id'])) d_droped_train = d_droped_train.drop(columns=['Unnamed: 20']) cols = d_droped_train.select_dtypes([np.number]).columns d_droped_train[cols] = d_droped_train[cols].abs() d_droped_train['account_length'].fillna(d_droped_train.account_length.median(), inplace=True) d_droped_train['intertiol_plan'].fillna('no', inplace=True) d_droped_train['voice_mail_plan'].fillna('no', inplace=True) d_droped_train.loc[d_droped_train['voice_mail_plan'] == 'no', 'number_vm_messages'] = 0 d_droped_train.loc[(d_droped_train['voice_mail_plan'] == 'yes') & d_droped_train['number_vm_messages'].isnull(), 'number_vm_messages'] = d_droped_train[d_droped_train.voice_mail_plan == 'yes'].number_vm_messages.median() d_droped_train.loc[d_droped_train['total_day_min'] > 500, 'total_day_min'] = np.nan d_droped_train['total_day_min'] = d_droped_train.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_day_calls'] > 350, 'total_day_calls'] = np.nan d_droped_train['total_day_calls'] = d_droped_train.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() d_droped_train['total_day_charge'] = d_droped_train.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_eve_min'] > 500, 'total_eve_min'] = np.nan d_droped_train['total_eve_min'] = d_droped_train.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_eve_calls'] = d_droped_train.sort_values(['total_eve_min']).total_eve_calls.ffill().sort_index() d_droped_train['total_eve_charge'] = d_droped_train.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[d_droped_train['total_night_minutes'] > 500, 'total_night_minutes'] = np.nan d_droped_train['total_night_minutes'] = d_droped_train.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_night_calls'] = d_droped_train.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() d_droped_train.loc[d_droped_train['total_night_charge'] > 150, 'total_night_charge'] = np.nan d_droped_train['total_night_charge'] = d_droped_train.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['total_intl_minutes'] = d_droped_train.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train.loc[(d_droped_train['total_intl_minutes'] > 0) & (d_droped_train['total_intl_charge'] > 0) & (d_droped_train['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan d_droped_train['total_intl_calls'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() d_droped_train['total_intl_charge'] = d_droped_train.sort_values(['total_intl_minutes']).total_intl_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() d_droped_train['customer_service_calls'].fillna(1, inplace=True) odm_handled_train = d_droped_train.dropna(subset=['Churn']) test_df = test_df.drop(columns=['Unnamed: 19', 'Unnamed: 20']) cols = test_df.select_dtypes([np.number]).columns test_df[cols] = test_df[cols].abs() test_df['location_code'] = test_df['location_code'].ffill() test_df['intertiol_plan'].fillna('no', inplace=True) test_df['voice_mail_plan'].fillna('no', inplace=True) test_df['number_vm_messages'].fillna(0, inplace=True) test_df['total_day_min'] = test_df.sort_values(['total_day_charge']).total_day_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_day_calls'] = test_df.sort_values(['total_day_min']).total_day_calls.ffill().sort_index() test_df['total_day_charge'] = test_df.sort_values(['total_day_min']).total_day_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_min'] = test_df.sort_values(['total_eve_charge']).total_eve_min.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_eve_charge'] = test_df.sort_values(['total_eve_min']).total_eve_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_minutes'] = test_df.sort_values(['total_night_charge']).total_night_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_night_calls'] = test_df.sort_values(['total_night_minutes']).total_night_calls.ffill().sort_index() test_df['total_night_charge'] = test_df.sort_values(['total_night_minutes']).total_night_charge.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df['total_intl_minutes'] = test_df.sort_values(['total_intl_charge']).total_intl_minutes.interpolate(method='linear', limit_direction='forward', axis=0).sort_index() test_df.loc[(test_df['total_intl_minutes'] > 0) & (test_df['total_intl_charge'] > 0) & (test_df['total_intl_calls'] < 1), 'total_intl_calls'] = np.nan test_df['total_intl_calls'] = test_df.sort_values(['total_intl_minutes']).total_intl_calls.ffill().sort_index() test_df['customer_service_calls'].fillna(1, inplace=True) train = odm_handled_train.copy() test = test_df.copy() train.head()
code
89136081/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d columns = ['Order', 'Country', 'Bronze', 'Silver', 'Gold', 'Total', 'Order by Total', 'Country Code', 'Discipline'] df_total_medals = df_total_medals.reindex(columns=columns) # Medals barplot df_total_medals = df_total_medals.sort_index(ascending=False) # figure prep plt.rcParams['figure.dpi'] = 200 # figure dots per inch fig = plt.figure(figsize=(3,30), facecolor='#f6f5f5') gs = fig.add_gridspec(1, 1) gs.update(wspace=1.5, hspace=0.05) background_color = "#f6f5f5" sns.set_palette(['#D8392B','#CD7F32','#C0C0C0','#FFD700']) ax0 = fig.add_subplot(gs[0, 0]) for s in ["right", "top"]: ax0.spines[s].set_visible(False) ax0.set_facecolor(background_color) #things to plot on the figure ax0_sns = df_total_medals.plot(x='Country',y=['Discipline','Bronze','Silver','Gold'],kind='barh',ax=ax0,zorder=2,width=0.8) ax0_sns.set_xlabel('Medals Count',fontsize=4, weight='bold') ax0_sns.set_ylabel('Team Name',fontsize=4, weight='bold') ax0_sns.grid(which='major', axis='x', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.grid(which='major', axis='y', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.tick_params(labelsize=3, width=0.5, length=1.5) ax0_sns.legend(['Discipline','Bronze', 'Silver','Gold'], ncol=4, facecolor='#D8D8D8'\ ,edgecolor=background_color, fontsize=3, bbox_to_anchor=(1, 1.005), loc='upper right') for p in ax0_sns.patches: value = f'{p.get_width():.0f}' if value == '0': pass else: x = p.get_x() + p.get_width() + 1 y = p.get_y() + p.get_height() / 2 ax0.text(x, y, value, ha='left', va='center', fontsize=3) Xstart, Xend = ax0.get_xlim() Ystart, Yend = ax0.get_ylim() ax0_sns.text(Xend-1, Yend+0.3, f'Medals Table', fontsize=6, weight='bold',ha='right') plt.show() athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] gender['M'] = 0 gender['F'] = 0 i = 0 while i < len(gender['gender']): if gender['gender'][i] == 'Male': gender['M'][i] = 1 else: gender['F'][i] = 1 i += 1 gender['Male'] = gender.groupby('Discipline')['M'].transform('sum') gender['Female'] = gender.groupby('Discipline')['F'].transform('sum') del gender['gender'] del gender['M'] del gender['F'] gender = gender.drop_duplicates() gender = gender.iloc[:-1, :] gender['Total'] = gender['Male'] + gender['Female'] total_male = int(gender['Male'].sum()) total_female = int(gender['Female'].sum()) gender.sort_values(by='Total', inplace=True) plt.rcParams['figure.dpi'] = 300 fig = plt.figure(figsize=(2, 5), facecolor='#f6f5f5') gs = fig.add_gridspec(1, 1) gs.update(wspace=1.5, hspace=0.05) background_color = '#f6f5f5' sns.set_palette(['#87ceeb', '#ff355d']) ax0 = fig.add_subplot(gs[0, 0]) for s in ['right', 'top']: ax0.spines[s].set_visible(False) ax0.set_facecolor(background_color) ax0_sns = gender.plot(x='Discipline', y=['Male', 'Female'], kind='barh', ax=ax0, zorder=2, width=0.8) ax0_sns.set_xlabel('Genders Count', fontsize=4, weight='bold') ax0_sns.set_ylabel('Discipline', fontsize=4, weight='bold') ax0_sns.grid(which='major', axis='x', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.grid(which='major', axis='y', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.tick_params(labelsize=3, width=0.5, length=1.5) ax0_sns.legend(['Male', 'Female'], ncol=2, facecolor='#D8D8D8', edgecolor=background_color, fontsize=3, bbox_to_anchor=(1, 1.03), loc='upper right') for p in ax0_sns.patches: value = f'{p.get_width():.0f}' x = p.get_x() + p.get_width() + 20 y = p.get_y() + p.get_height() / 2 ax0.text(x, y, value, ha='left', va='center', fontsize=3, bbox=dict(facecolor='none', edgecolor='black', boxstyle='round', linewidth=0.3)) ax0_sns.text(300, 16, f'Gender Plot', fontsize=6, weight='bold', ha='right') ax0.text(480, 15.5, f'Entries by Discipline and number of females and males taking part in it', fontsize=3, ha='right') plt.show()
code
89136081/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') columns = ['Order', 'Country', 'Bronze', 'Silver', 'Gold', 'Total', 'Order by Total', 'Country Code', 'Discipline'] df_total_medals = df_total_medals.reindex(columns=columns) for i in range(len(df_total_medals)): df_total_medals['Country'][i] = '#' + str(df_total_medals['Order'][i]) + ' ' + df_total_medals['Country'][i]
code
89136081/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d columns = ['Order', 'Country', 'Bronze', 'Silver', 'Gold', 'Total', 'Order by Total', 'Country Code', 'Discipline'] df_total_medals = df_total_medals.reindex(columns=columns) # Medals barplot df_total_medals = df_total_medals.sort_index(ascending=False) # figure prep plt.rcParams['figure.dpi'] = 200 # figure dots per inch fig = plt.figure(figsize=(3,30), facecolor='#f6f5f5') gs = fig.add_gridspec(1, 1) gs.update(wspace=1.5, hspace=0.05) background_color = "#f6f5f5" sns.set_palette(['#D8392B','#CD7F32','#C0C0C0','#FFD700']) ax0 = fig.add_subplot(gs[0, 0]) for s in ["right", "top"]: ax0.spines[s].set_visible(False) ax0.set_facecolor(background_color) #things to plot on the figure ax0_sns = df_total_medals.plot(x='Country',y=['Discipline','Bronze','Silver','Gold'],kind='barh',ax=ax0,zorder=2,width=0.8) ax0_sns.set_xlabel('Medals Count',fontsize=4, weight='bold') ax0_sns.set_ylabel('Team Name',fontsize=4, weight='bold') ax0_sns.grid(which='major', axis='x', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.grid(which='major', axis='y', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.tick_params(labelsize=3, width=0.5, length=1.5) ax0_sns.legend(['Discipline','Bronze', 'Silver','Gold'], ncol=4, facecolor='#D8D8D8'\ ,edgecolor=background_color, fontsize=3, bbox_to_anchor=(1, 1.005), loc='upper right') for p in ax0_sns.patches: value = f'{p.get_width():.0f}' if value == '0': pass else: x = p.get_x() + p.get_width() + 1 y = p.get_y() + p.get_height() / 2 ax0.text(x, y, value, ha='left', va='center', fontsize=3) Xstart, Xend = ax0.get_xlim() Ystart, Yend = ax0.get_ylim() ax0_sns.text(Xend-1, Yend+0.3, f'Medals Table', fontsize=6, weight='bold',ha='right') plt.show() athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] gender['M'] = 0 gender['F'] = 0 i = 0 while i < len(gender['gender']): if gender['gender'][i] == 'Male': gender['M'][i] = 1 else: gender['F'][i] = 1 i += 1 gender['Male'] = gender.groupby('Discipline')['M'].transform('sum') gender['Female'] = gender.groupby('Discipline')['F'].transform('sum') del gender['gender'] del gender['M'] del gender['F'] gender = gender.drop_duplicates() gender = gender.iloc[:-1, :] gender['Total'] = gender['Male'] + gender['Female'] total_male = int(gender['Male'].sum()) total_female = int(gender['Female'].sum()) gender.sort_values(by='Total',inplace=True) # Gender barplot plt.rcParams['figure.dpi'] = 300 fig = plt.figure(figsize=(2,5), facecolor='#f6f5f5') gs = fig.add_gridspec(1, 1) gs.update(wspace=1.5, hspace=0.05) background_color = "#f6f5f5" sns.set_palette(['#87ceeb','#ff355d']) ax0 = fig.add_subplot(gs[0, 0]) for s in ["right", "top"]: ax0.spines[s].set_visible(False) ax0.set_facecolor(background_color) ax0_sns = gender.plot(x='Discipline',y=['Male','Female'],kind='barh',ax=ax0,zorder=2,width=0.8) ##plotttt of bars ax0_sns.set_xlabel('Genders Count',fontsize=4, weight='bold',) ax0_sns.set_ylabel('Discipline',fontsize=4, weight='bold') ax0_sns.grid(which='major', axis='x', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.grid(which='major', axis='y', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.tick_params(labelsize=3, width=0.5, length=1.5) # w and l of petit trait de mesure de l'axe x et y ax0_sns.legend(['Male', 'Female'], ncol=2, facecolor='#D8D8D8', edgecolor=background_color, fontsize=3, bbox_to_anchor=(1, 1.03), loc='upper right') for p in ax0_sns.patches: value = f'{p.get_width():.0f}' if value == '0': pass else: x = p.get_x() + p.get_width() + 20 y = p.get_y() + p.get_height() / 2 ax0.text(x, y, value, ha='left', va='center', fontsize=3, bbox=dict(facecolor='none', edgecolor='black', boxstyle='round', linewidth=0.3)) ax0_sns.text(300,16, f'Gender Plot', fontsize=6, weight='bold',ha='right') ax0.text(480, 15.5,f'Entries by Discipline and number of females and males taking part in it',fontsize=3,ha='right') plt.show() cols = ['Country', 'Discipline'] athletes = athletes[cols] y = athletes.Country.value_counts().index x = athletes.Country.value_counts().values plt.rcParams['figure.dpi'] = 300 fig = plt.figure(figsize=(2, 48), facecolor='#f6f5f5') gs = fig.add_gridspec(1, 1) gs.update(wspace=1.5, hspace=0.05) background_color = '#f6f5f5' sns.set_palette(['#bca6cf'] * 1200) ax0 = fig.add_subplot(gs[0, 0]) for s in ['right', 'top']: ax0.spines[s].set_visible(False) ax0.set_facecolor(background_color) ax0_sns = sns.barplot(data=athletes, y=y, x=x, zorder=2) ax0_sns.set_xlabel('No of Athletes', fontsize=4, weight='bold') ax0_sns.set_ylabel('Countries', fontsize=4, weight='bold') ax0_sns.grid(which='major', axis='x', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.grid(which='major', axis='y', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.tick_params(labelsize=3, width=0.5, length=1.5) for p in ax0_sns.patches: value = f'{p.get_width():.0f}' x = p.get_x() + p.get_width() + 20 y = p.get_y() + p.get_height() / 2 ax0.text(x, y, value, ha='left', va='center', fontsize=3, bbox=dict(facecolor='none', edgecolor='black', boxstyle='round', linewidth=0.3)) ax0_sns.text(150, -1.6, f'Athletes Plot', fontsize=6, weight='bold', ha='right') ax0.text(175, -1.3, f'Contains details about the participating Athletes', fontsize=3, ha='right') plt.show()
code
89136081/cell_4
[ "image_output_1.png" ]
pip install openpyxl
code
89136081/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] athletes.head()
code
89136081/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] gender['M'] = 0 gender['F'] = 0 i = 0 while i < len(gender['gender']): if gender['gender'][i] == 'Male': gender['M'][i] = 1 else: gender['F'][i] = 1 i += 1 gender['Male'] = gender.groupby('Discipline')['M'].transform('sum') gender['Female'] = gender.groupby('Discipline')['F'].transform('sum') del gender['gender'] del gender['M'] del gender['F'] gender = gender.drop_duplicates() gender = gender.iloc[:-1, :] gender['Total'] = gender['Male'] + gender['Female'] total_male = int(gender['Male'].sum()) total_female = int(gender['Female'].sum()) print('There is a total of', total_male, 'male and', total_female, 'female for the Beijing 2022 Olympic Games')
code
89136081/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] gender['M'] = 0 gender['F'] = 0 i = 0 while i < len(gender['gender']): if gender['gender'][i] == 'Male': gender['M'][i] = 1 else: gender['F'][i] = 1 i += 1 gender['Male'] = gender.groupby('Discipline')['M'].transform('sum') gender['Female'] = gender.groupby('Discipline')['F'].transform('sum') del gender['gender'] del gender['M'] del gender['F'] gender = gender.drop_duplicates() gender = gender.iloc[:-1, :] gender['Total'] = gender['Male'] + gender['Female'] total_male = int(gender['Male'].sum()) total_female = int(gender['Female'].sum())
code
89136081/cell_7
[ "image_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter')
code
89136081/cell_18
[ "image_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] gender['Discipline'].value_counts()
code
89136081/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] gender.info()
code
89136081/cell_24
[ "image_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') d_start = pd.to_datetime(df_events['time'][0]) d_end = pd.to_datetime(df_events['time'][len(df_events) - 1]) days = d_end - d_start df_og = pd.DataFrame() competition = ['Summer', 'Winter'] nb_discipline = [] nb_country = [] nb_athlete = [] nb_discipline.append(len(s_athletes['Discipline'].unique())) nb_discipline.append(len(df_medal['discipline'].unique())) nb_country.append(len(s_athletes['NOC'].unique())) nb_country.append(len(df_athletes['country'].unique())) nb_athlete.append(len(s_athletes['Name'].unique())) nb_athlete.append(len(df_athletes['name'].unique())) df_og['competition'] = competition df_og['disciplines'] = nb_discipline df_og['countries'] = nb_country df_og['athletes'] = nb_athlete df_og df_og.style.set_caption('Summer vs Winter') cols = ['medal_type', 'event', 'country', 'discipline'] df_medal = df_medal[cols] countries = df_total_medals['Country'] nb_d = [] df_medal.drop_duplicates() for country in countries: temp = df_medal.drop_duplicates()[df_medal.drop_duplicates()['country'] == country] temp_2 = temp['discipline'] temp_2 = temp_2.drop_duplicates() nb_d.append(len(temp_2)) df_total_medals['Discipline'] = nb_d athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') athletes = athletes.rename(columns={'discipline': 'Discipline', 'country': 'Country'}) cols = ['gender', 'Discipline'] gender = athletes[cols] cols = ['Country', 'Discipline'] athletes = athletes[cols] athletes.head()
code
89136081/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') columns = ['Order', 'Country', 'Bronze', 'Silver', 'Gold', 'Total', 'Order by Total', 'Country Code', 'Discipline'] df_total_medals = df_total_medals.reindex(columns=columns) df_total_medals = df_total_medals.sort_index(ascending=False) plt.rcParams['figure.dpi'] = 200 fig = plt.figure(figsize=(3, 30), facecolor='#f6f5f5') gs = fig.add_gridspec(1, 1) gs.update(wspace=1.5, hspace=0.05) background_color = '#f6f5f5' sns.set_palette(['#D8392B', '#CD7F32', '#C0C0C0', '#FFD700']) ax0 = fig.add_subplot(gs[0, 0]) for s in ['right', 'top']: ax0.spines[s].set_visible(False) ax0.set_facecolor(background_color) ax0_sns = df_total_medals.plot(x='Country', y=['Discipline', 'Bronze', 'Silver', 'Gold'], kind='barh', ax=ax0, zorder=2, width=0.8) ax0_sns.set_xlabel('Medals Count', fontsize=4, weight='bold') ax0_sns.set_ylabel('Team Name', fontsize=4, weight='bold') ax0_sns.grid(which='major', axis='x', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.grid(which='major', axis='y', zorder=0, color='#EEEEEE', linewidth=0.4) ax0_sns.tick_params(labelsize=3, width=0.5, length=1.5) ax0_sns.legend(['Discipline', 'Bronze', 'Silver', 'Gold'], ncol=4, facecolor='#D8D8D8', edgecolor=background_color, fontsize=3, bbox_to_anchor=(1, 1.005), loc='upper right') for p in ax0_sns.patches: value = f'{p.get_width():.0f}' x = p.get_x() + p.get_width() + 1 y = p.get_y() + p.get_height() / 2 ax0.text(x, y, value, ha='left', va='center', fontsize=3) Xstart, Xend = ax0.get_xlim() Ystart, Yend = ax0.get_ylim() ax0_sns.text(Xend - 1, Yend + 0.3, f'Medals Table', fontsize=6, weight='bold', ha='right') plt.show()
code
89136081/cell_10
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df_medal = pd.read_csv('../input/beijing-2022-olympics/medals.csv') df_total_medals = pd.read_csv('../input/beijing-2022-olympics/medals_total.csv') df_events = pd.read_csv('../input/beijing-2022-olympics/events.csv') df_athletes = pd.read_csv('../input/beijing-2022-olympics/athletes.csv') s_athletes = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx') df_medal.head()
code
34143777/cell_13
[ "text_plain_output_1.png" ]
from keras import optimizers from keras.layers import Activation, Convolution2D, Dropout, Conv2D,MaxPool2D from keras.layers import Dense, Conv2D , MaxPooling2D , Flatten , Dropout from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.models import Sequential from keras.models import Sequential from sklearn.preprocessing import LabelBinarizer import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_train.csv') test = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_test.csv') X_train = train.iloc[:, 1:] Y_train = train.iloc[:, 0] X_test = test.iloc[:, 1:] Y_test = test.iloc[:, 0] (X_train.shape, Y_train.shape) X_train = np.array(X_train) X_test = np.array(X_test) X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) X_train.shape from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() Y_train = encoder.fit_transform(Y_train) Y_test = encoder.fit_transform(Y_test) model = Sequential() model.add(Conv2D(input_shape=(28, 28, 1), filters=20, kernel_size=(3, 3), padding='same', activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(MaxPool2D(2, 2)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPool2D(2, 2)) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(128, (1, 1), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Flatten()) model.add(Dense(units=64, activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(Dense(units=24, activation='softmax')) learning_rate = 0.001 lr_decay = 1e-06 sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True) adam = optimizers.Adam(lr=0.002) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) history = model.fit(X_train, Y_train, epochs=60, batch_size=524, verbose=1, validation_data=(X_valid, Y_valid)) testModel = model.evaluate(X_test, Y_test) print('Acuarcy = %.2f%%' % (testModel[1] * 100)) print('Loss = %.2f%%' % (testModel[0] * 100)) print(history.history.keys()) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show()
code
34143777/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34143777/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_train.csv') test = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_test.csv') X_train = train.iloc[:, 1:] Y_train = train.iloc[:, 0] X_test = test.iloc[:, 1:] Y_test = test.iloc[:, 0] (X_train.shape, Y_train.shape) X_train = np.array(X_train) X_test = np.array(X_test) X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) X_train.shape
code
34143777/cell_15
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from keras import optimizers from keras.layers import Activation, Convolution2D, Dropout, Conv2D,MaxPool2D from keras.layers import Dense, Conv2D , MaxPooling2D , Flatten , Dropout from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.models import Sequential from keras.models import Sequential from sklearn.metrics import classification_report, confusion_matrix from sklearn.preprocessing import LabelBinarizer import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_train.csv') test = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_test.csv') X_train = train.iloc[:, 1:] Y_train = train.iloc[:, 0] X_test = test.iloc[:, 1:] Y_test = test.iloc[:, 0] (X_train.shape, Y_train.shape) X_train = np.array(X_train) X_test = np.array(X_test) X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) X_train.shape from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() Y_train = encoder.fit_transform(Y_train) Y_test = encoder.fit_transform(Y_test) model = Sequential() model.add(Conv2D(input_shape=(28, 28, 1), filters=20, kernel_size=(3, 3), padding='same', activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(MaxPool2D(2, 2)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPool2D(2, 2)) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(128, (1, 1), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Flatten()) model.add(Dense(units=64, activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(Dense(units=24, activation='softmax')) learning_rate = 0.001 lr_decay = 1e-06 sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True) adam = optimizers.Adam(lr=0.002) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) history = model.fit(X_train, Y_train, epochs=60, batch_size=524, verbose=1, validation_data=(X_valid, Y_valid)) testModel = model.evaluate(X_test, Y_test) predicted_classes = model.predict_classes(X_test) rounded_labels = np.argmax(Y_test, axis=1) confusionMatrix = confusion_matrix(rounded_labels, predicted_classes) confusionMatrix = pd.DataFrame(confusionMatrix, index=[i for i in range(25) if i != 9], columns=[i for i in range(25) if i != 9]) classes = ['Class ' + str(i) for i in range(25) if i != 9] print(classification_report(rounded_labels, predicted_classes, target_names=classes))
code
34143777/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_train.csv') test = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_test.csv') X_train = train.iloc[:, 1:] Y_train = train.iloc[:, 0] X_test = test.iloc[:, 1:] Y_test = test.iloc[:, 0] (X_train.shape, Y_train.shape)
code
34143777/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras import optimizers from keras.layers import Activation, Convolution2D, Dropout, Conv2D,MaxPool2D from keras.layers import Dense, Conv2D , MaxPooling2D , Flatten , Dropout from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.models import Sequential from keras.models import Sequential from sklearn.metrics import classification_report, confusion_matrix from sklearn.preprocessing import LabelBinarizer import keras import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_train.csv') test = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_test.csv') X_train = train.iloc[:, 1:] Y_train = train.iloc[:, 0] X_test = test.iloc[:, 1:] Y_test = test.iloc[:, 0] (X_train.shape, Y_train.shape) X_train = np.array(X_train) X_test = np.array(X_test) X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) X_train.shape from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() Y_train = encoder.fit_transform(Y_train) Y_test = encoder.fit_transform(Y_test) model = Sequential() model.add(Conv2D(input_shape=(28, 28, 1), filters=20, kernel_size=(3, 3), padding='same', activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(MaxPool2D(2, 2)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPool2D(2, 2)) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(128, (1, 1), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Flatten()) model.add(Dense(units=64, activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(Dense(units=24, activation='softmax')) learning_rate = 0.001 lr_decay = 1e-06 sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True) adam = optimizers.Adam(lr=0.002) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) history = model.fit(X_train, Y_train, epochs=60, batch_size=524, verbose=1, validation_data=(X_valid, Y_valid)) testModel = model.evaluate(X_test, Y_test) predicted_classes = model.predict_classes(X_test) rounded_labels = np.argmax(Y_test, axis=1) confusionMatrix = confusion_matrix(rounded_labels, predicted_classes) confusionMatrix = pd.DataFrame(confusionMatrix, index=[i for i in range(25) if i != 9], columns=[i for i in range(25) if i != 9]) plt.figure(figsize=(15, 15)) sns.heatmap(confusionMatrix, cmap='OrRd_r', linecolor='black', linewidth=1, annot=True, fmt='')
code
34143777/cell_12
[ "text_plain_output_1.png" ]
from keras import optimizers from keras.layers import Activation, Convolution2D, Dropout, Conv2D,MaxPool2D from keras.layers import Dense, Conv2D , MaxPooling2D , Flatten , Dropout from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.models import Sequential from keras.models import Sequential from sklearn.preprocessing import LabelBinarizer import keras import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_train.csv') test = pd.read_csv('/kaggle/input/sign-language-mnist/sign_mnist_test.csv') X_train = train.iloc[:, 1:] Y_train = train.iloc[:, 0] X_test = test.iloc[:, 1:] Y_test = test.iloc[:, 0] (X_train.shape, Y_train.shape) X_train = np.array(X_train) X_test = np.array(X_test) X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) X_train.shape from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() Y_train = encoder.fit_transform(Y_train) Y_test = encoder.fit_transform(Y_test) model = Sequential() model.add(Conv2D(input_shape=(28, 28, 1), filters=20, kernel_size=(3, 3), padding='same', activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(MaxPool2D(2, 2)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPool2D(2, 2)) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Conv2D(128, (1, 1), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(keras.layers.Dropout(0.1)) model.add(Flatten()) model.add(Dense(units=64, activation='relu')) model.add(keras.layers.Dropout(0.1)) model.add(Dense(units=24, activation='softmax')) learning_rate = 0.001 lr_decay = 1e-06 sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True) adam = optimizers.Adam(lr=0.002) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) history = model.fit(X_train, Y_train, epochs=60, batch_size=524, verbose=1, validation_data=(X_valid, Y_valid))
code
105203731/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape df.corr() sns.heatmap(df.corr(), annot=True)
code
105203731/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import r2_score import statsmodels.api as sm x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params lr_model.summary() y_train_pred = lr_model.predict(x_train_sm) x_test_sm = sm.add_constant(x_test) y_test_pred = lr_model.predict(x_test_sm) r2 = r2_score(y_test, y_test_pred) r2
code
105203731/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape
code
105203731/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import statsmodels.api as sm df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params lr_model.summary() y_train_pred = lr_model.predict(x_train_sm) res = y_train - y_train_pred x_test_sm = sm.add_constant(x_test) y_test_pred = lr_model.predict(x_test_sm) plt.figure() plt.scatter(x_test, y_test) plt.plot(x_test, y_test_pred, 'y')
code
105203731/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import statsmodels.api as sm df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape df.corr() x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params lr_model.summary() y_train_pred = lr_model.predict(x_train_sm) res = y_train - y_train_pred sns.distplot(res)
code
105203731/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape plt.scatter(df['x'], df['y']) plt.show()
code
105203731/cell_11
[ "text_plain_output_1.png" ]
x_train.shape
code
105203731/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import statsmodels.api as sm df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params lr_model.summary() y_train_pred = lr_model.predict(x_train_sm) res = y_train - y_train_pred plt.scatter(x_train, res)
code
105203731/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape sns.regplot(x='x', y='y', data=df)
code
105203731/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape df.corr()
code
105203731/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import statsmodels.api as sm x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params lr_model.summary()
code
105203731/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import statsmodels.api as sm df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params lr_model.summary() y_train_pred = lr_model.predict(x_train_sm) plt.scatter(x_train, y_train) plt.plot(x_train, y_train_pred) plt.show()
code
105203731/cell_14
[ "image_output_1.png" ]
import statsmodels.api as sm x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm model = sm.OLS(y_train, x_train_sm) lr_model = model.fit() lr_model.params
code
105203731/cell_12
[ "text_html_output_1.png" ]
import statsmodels.api as sm x_train.shape x_train_sm = sm.add_constant(x_train) x_train_sm
code
105203731/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/random-linear-regression/train.csv') df.shape df.head()
code
33098715/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() df = data.agg({'price': ['sum', 'min', 'max', 'median'], 'mileage': ['sum', 'min', 'max', 'median']}) print('Max Year: ', data['year'].max()) print('Min Year: ', data['year'].min())
code
33098715/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() data['brand'].value_counts().head()
code
33098715/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.figure(figsize=(18, 8)) sns.countplot(data['brand']) plt.tight_layout() plt.xticks(rotation=90) plt.xlabel('Car Brands') plt.show()
code
33098715/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.describe().transpose()
code
33098715/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() df = data.agg({'price': ['sum', 'min', 'max', 'median'], 'mileage': ['sum', 'min', 'max', 'median']}) df
code
33098715/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum()
code
33098715/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() data['model'].value_counts().head()
code
33098715/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33098715/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() data.head()
code
33098715/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.figure(figsize=(15, 8)) sns.distplot(data['price']) plt.show()
code
33098715/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() print('Unique car brands: ', data['brand'].nunique()) print('Unique car models: ', data['model'].nunique())
code
33098715/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() data.head()
code
33098715/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.figure(figsize=(18, 7)) sns.countplot(data['color']) plt.xticks(rotation=90) plt.title('Most used colors on cars') plt.show()
code
33098715/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.info()
code
33098715/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() print('Mean price for a car: ', round(data['price'].mean(), 2))
code
33098715/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0) data.isnull().sum() plt.tight_layout() plt.xticks(rotation=90) plt.tight_layout() plt.xticks(rotation=90) plt.xticks(rotation=90) plt.tight_layout() plt.figure(figsize=(20, 8)) data.groupby('model')['price'].mean().sort_values(ascending=False).plot.bar() plt.xticks(rotation=90) plt.ylabel('Mean Price') plt.xlabel('Car Models') plt.tight_layout() plt.show()
code