path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122262215/cell_41
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import MultinomialNB model = MultinomialNB() model.fit(X_train, y_train) model.score(X_test, y_test)
code
122262215/cell_7
[ "text_plain_output_1.png" ]
import re email = 'We are goind USA to meet on saturday or sunday on 09:30 PM or 10:00 am ok on january good ? in Cairo or Giza ?' email = email.lower() re.findall('saturday|sunday|monday|wednesday', email) re.findall('january|february', email)
code
122262215/cell_28
[ "text_plain_output_1.png" ]
import numpy as np test = 'The standard way to access entity annotations is the doc.ents property, which produces a sequence of Span objects. The entity type is accessible either as a hash value or as a string using the attributes ent.label and The Span object acts as a sequence of tokens so you can iterate over the entity or index into it. You can also get the text form of the whole entity, as though it were a single token.' clean_text = clean(test) clean_text_arr = np.array(clean_text.split()) clean_text_arr.shape
code
122262215/cell_8
[ "text_plain_output_1.png" ]
import re email = 'We are goind USA to meet on saturday or sunday on 09:30 PM or 10:00 am ok on january good ? in Cairo or Giza ?' email = email.lower() re.findall('saturday|sunday|monday|wednesday', email) re.findall('january|february', email) re.findall('\\d{1,2}:\\d{1,2} a?p?m', email)
code
122262215/cell_43
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB import pandas as pd import pandas as pd import re import re email = 'We are goind USA to meet on saturday or sunday on 09:30 PM or 10:00 am ok on january good ? in Cairo or Giza ?' email = email.lower() re.findall('saturday|sunday|monday|wednesday', email) re.findall('january|february', email) re.findall('\\d{1,2}:\\d{1,2} a?p?m', email) df = pd.DataFrame(columns=['text', 'label']) old_dataset = pd.read_csv('./events.csv') import re import numpy as np import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer wnl = WordNetLemmatizer() engstopwords = stopwords.words('english') def lemmatize_all_types(word): word = wnl.lemmatize(word, 'a') word = wnl.lemmatize(word, 'v') word = wnl.lemmatize(word, 'n') return word def clean(text): text = re.sub('https?://\\w+\\.\\w+\\.\\w+', '', text).lower() text = re.sub('[^a-zA-Z ]', '', text) text = list(map(lemmatize_all_types, text.split())) text = [word for word in text if word not in engstopwords] text = ' '.join(text) return text df = pd.read_csv('../input/emails-events/emails_events.csv') tfidf = TfidfVectorizer(max_features=10000) dtm = tfidf.fit_transform(X).toarray() words = tfidf.get_feature_names() X_dtm = pd.DataFrame(columns=words, data=dtm) model = MultinomialNB() model.fit(X_train, y_train) model.score(X_test, y_test) text = 'can we have meeting on the next week please on morning' text = clean(text) enc = tfidf.transform([text]) model.predict(enc) text = 'what a beautiful garden that we saw in the cinema' text = clean(text) enc = tfidf.transform([text]) model.predict(enc)
code
122262215/cell_24
[ "text_plain_output_1.png" ]
from bs4 import BeautifulSoup import pandas as pd import requests ps = soup.find_all('p', {'class': 'sentence-item__text'}) df = pd.DataFrame(columns=['text', 'label']) days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split() days days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split() for day in days: page = requests.get('https://sentence.yourdictionary.com/saturday') soup = BeautifulSoup(page.content, 'html.parser') ps = soup.find_all('p', {'class': 'sentence-item__text'}) for p in ps: df = df.append({'text': p.text, 'label': 1}, ignore_index=True) days = 'Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.lower().split() for day in days: page = requests.get('https://sentence.yourdictionary.com/' + day) soup = BeautifulSoup(page.content, 'html.parser') ps = soup.find_all('p', {'class': 'sentence-item__text'}) for p in ps: df = df.append({'text': p.text, 'label': 1}, ignore_index=True) old_dataset.columns = ['text', 'label'] old_dataset.to_csv('good_dataset.csv', index=False) months = 'January February March April May June July August September October November December'.lower().split() for month in months: page = requests.get('https://sentence.yourdictionary.com/' + month) soup = BeautifulSoup(page.content, 'html.parser') ps = soup.find_all('p', {'class': 'sentence-item__text'}) for p in ps: df = df.append({'text': p.text, 'label': 1}, ignore_index=True) for item in ['again']: page = requests.get('https://sentence.yourdictionary.com/' + item) soup = BeautifulSoup(page.content, 'html.parser') ps = soup.find_all('p', {'class': 'sentence-item__text'}) for p in ps: old_dataset = old_dataset.append({'text': p.text, 'label': 0}, ignore_index=True) old_dataset.shape old_dataset = pd.read_csv('./events.csv') old_dataset.shape
code
122262215/cell_10
[ "text_plain_output_1.png" ]
import spacy email = 'We are goind USA to meet on saturday or sunday on 09:30 PM or 10:00 am ok on january good ? in Cairo or Giza ?' email = email.lower() import spacy nlp = spacy.load('en_core_web_sm') email = 'We are goind to meet on 2025 1919 saturday or sunday on 09:30 PM or 10:00 in New York or Florida am ok on january good ? in Cairo or Giza ?' doc = nlp(email) for ent in doc.ents: print(ent.text, ent.label_)
code
122262215/cell_37
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd df = pd.DataFrame(columns=['text', 'label']) old_dataset = pd.read_csv('./events.csv') df = pd.read_csv('../input/emails-events/emails_events.csv') tfidf = TfidfVectorizer(max_features=10000) dtm = tfidf.fit_transform(X).toarray() words = tfidf.get_feature_names() X_dtm = pd.DataFrame(columns=words, data=dtm)
code
34134627/cell_28
[ "text_plain_output_1.png" ]
import csv import gensim import matplotlib.pyplot as plt import numpy as np with open('../input/tokenized-words-cord19-challenge/data.csv', newline='') as f: reader = csv.reader(f) data = list(reader) model2 = gensim.models.Word2Vec(data, min_count=1, size=100, window=5, sg=1) def truncate(n, decimals=0): multiplier = 10 ** decimals return int(n * multiplier) / multiplier a = truncate(model2.similarity('risk', 'smoking'), 2) b = truncate(model2.similarity('risk', 'heart'), 2) c = truncate(model2.similarity('risk', 'pregnant'), 2) d = truncate(model2.similarity('risk', 'cancer'), 2) e = truncate(model2.similarity('risk', 'diabetes'), 2) f = truncate(model2.similarity('risk', 'age'), 2) g = truncate(model2.similarity('risk', 'asthma'), 2) h = truncate(model2.similarity('risk', 'HIV'), 2) i = truncate(model2.similarity('risk', 'transplant'), 2) j = truncate(model2.similarity('risk', 'obesity'), 2) k = truncate(model2.similarity('risk', 'immunocompromised'), 2) l = truncate(model2.similarity('risk', 'underweight'), 2) m = truncate(model2.similarity('risk', 'liver'), 2) n = truncate(model2.similarity('risk', 'bronchitis'), 2) o = truncate(model2.similarity('risk', 'COPD'), 2) objects = ('Smoking', 'Heart Disease', 'Pregnancy', 'Cancer', 'Diabetes', 'Age', 'Asthma', 'HIV', 'Transplant', 'Obesity', 'Immunocompromised', 'Underweight', 'Liver Disease', 'Chronic Bronchitis', 'COPD') y_pos = np.arange(len(objects)) similarity = [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o] fig = plt.figure(1, [15, 10]) axes = plt.gca() axes.set_ylim([0, 1]) plt.bar(y_pos, similarity, align='center', alpha=0.5) plt.xticks(y_pos, objects, rotation=90) plt.ylabel('Cosine similarity with the word "risk" using Skip Gram model') plt.title('Risk Factors') count = -0.27 for i in similarity: plt.text(count, i - 0.05, str(i)) count += 1 plt.show()
code
34134627/cell_3
[ "image_output_1.png" ]
import nltk import warnings from tqdm.notebook import tqdm import csv import nltk nltk.download('punkt') from nltk.tokenize import sent_tokenize, word_tokenize from gensim.models import Word2Vec import gensim import os import json import re import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings(action='ignore')
code
34134627/cell_27
[ "text_plain_output_1.png" ]
import csv import gensim with open('../input/tokenized-words-cord19-challenge/data.csv', newline='') as f: reader = csv.reader(f) data = list(reader) model2 = gensim.models.Word2Vec(data, min_count=1, size=100, window=5, sg=1) def truncate(n, decimals=0): multiplier = 10 ** decimals return int(n * multiplier) / multiplier print('Cosine similarity using Skip Gram model between:') a = truncate(model2.similarity('risk', 'smoking'), 2) b = truncate(model2.similarity('risk', 'heart'), 2) c = truncate(model2.similarity('risk', 'pregnant'), 2) d = truncate(model2.similarity('risk', 'cancer'), 2) e = truncate(model2.similarity('risk', 'diabetes'), 2) f = truncate(model2.similarity('risk', 'age'), 2) g = truncate(model2.similarity('risk', 'asthma'), 2) h = truncate(model2.similarity('risk', 'HIV'), 2) i = truncate(model2.similarity('risk', 'transplant'), 2) j = truncate(model2.similarity('risk', 'obesity'), 2) k = truncate(model2.similarity('risk', 'immunocompromised'), 2) l = truncate(model2.similarity('risk', 'underweight'), 2) m = truncate(model2.similarity('risk', 'liver'), 2) n = truncate(model2.similarity('risk', 'bronchitis'), 2) o = truncate(model2.similarity('risk', 'COPD'), 2) print('risk and smoking : ', a) print('risk and heart : ', b) print('risk and pregnant : ', c) print('risk and cancer : ', d) print('risk and diabetes : ', e) print('risk and age : ', f) print('risk and asthma : ', g) print('risk and HIV : ', h) print('risk and transplant : ', i) print('risk and obesity : ', j) print('risk and immunocompromised : ', k) print('risk and underweight : ', l) print('risk and liver : ', m) print('risk and bronchitis : ', n) print('risk and COPD : ', o)
code
50224594/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() df.columns
code
50224594/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() df['status'].value_counts()
code
50224594/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape
code
50224594/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.metrics import classification_report, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from xgboost import XGBClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() from sklearn.metrics import classification_report, confusion_matrix xgb = XGBClassifier() xgb.fit(x_train, y_train) y_pred = xgb.predict(x_test) accuracy1 = xgb.score(x_test, y_test) cm = confusion_matrix(y_test, y_pred) from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier des_class = DecisionTreeClassifier() des_class.fit(x_train, y_train) des_predict = des_class.predict(x_test) print(classification_report(y_test, des_predict)) accuracy3 = des_class.score(x_test, y_test) print(accuracy3 * 100, '%') cm = confusion_matrix(y_test, des_predict) sns.heatmap(cm, annot=True)
code
50224594/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T
code
50224594/cell_2
[ "image_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50224594/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() sns.catplot(x='status', kind='count', data=df)
code
50224594/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum()
code
50224594/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() df.columns col = {'MDVP:Fo(Hz)': 1, 'MDVP:Fhi(Hz)': 2, 'MDVP:Flo(Hz)': 3, 'MDVP:Jitter(%)': 4, 'MDVP:Jitter(Abs)': 5, 'MDVP:RAP': 6, 'MDVP:PPQ': 7, 'Jitter:DDP': 8, 'MDVP:Shimmer': 9, 'MDVP:Shimmer(dB)': 10, 'Shimmer:APQ3': 11, 'Shimmer:APQ5': 12, 'MDVP:APQ': 13, 'Shimmer:DDA': 14, 'NHR': 15, 'HNR': 16, 'RPDE': 17, 'DFA': 18, 'spread1': 19, 'spread2': 20, 'D2': 21, 'PPE': 22} q1 = df.quantile(0.25) q2 = df.quantile(0.5) q3 = df.quantile(0.75) IQR = q3 - q1 df_out = df[~((df < q1 - 1.5 * IQR) | (df > q3 + 1.5 * IQR)).any(axis=1)] plt.figure(figsize=(20, 30)) for variable, i in col.items(): plt.subplot(5, 5, i) plt.boxplot(df_out[variable]) plt.title(variable) plt.show()
code
50224594/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import classification_report, confusion_matrix from sklearn.metrics import classification_report, confusion_matrix from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from xgboost import XGBClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() from sklearn.metrics import classification_report, confusion_matrix xgb = XGBClassifier() xgb.fit(x_train, y_train) y_pred = xgb.predict(x_test) accuracy1 = xgb.score(x_test, y_test) cm = confusion_matrix(y_test, y_pred) from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier des_class = DecisionTreeClassifier() des_class.fit(x_train, y_train) des_predict = des_class.predict(x_test) accuracy3 = des_class.score(x_test, y_test) cm = confusion_matrix(y_test, des_predict) from sklearn.naive_bayes import GaussianNB from sklearn.metrics import classification_report, confusion_matrix nvclassifier = GaussianNB() nvclassifier.fit(x_train, y_train) y_pred = nvclassifier.predict(x_test) print(classification_report(y_test, y_pred)) print(accuracy_score(y_pred, y_test) * 100, '%') cm = confusion_matrix(y_test, y_pred) sns.heatmap(cm, annot=True)
code
50224594/cell_28
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.metrics import classification_report, confusion_matrix from xgboost import XGBClassifier from xgboost import XGBClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() from sklearn.metrics import classification_report, confusion_matrix xgb = XGBClassifier() xgb.fit(x_train, y_train) y_pred = xgb.predict(x_test) print(classification_report(y_test, y_pred)) accuracy1 = xgb.score(x_test, y_test) print(accuracy1 * 100, '%') cm = confusion_matrix(y_test, y_pred) sns.heatmap(cm, annot=True)
code
50224594/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() df.hist(figsize=(20, 12)) plt.show()
code
50224594/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() df.columns col = {'MDVP:Fo(Hz)': 1, 'MDVP:Fhi(Hz)': 2, 'MDVP:Flo(Hz)': 3, 'MDVP:Jitter(%)': 4, 'MDVP:Jitter(Abs)': 5, 'MDVP:RAP': 6, 'MDVP:PPQ': 7, 'Jitter:DDP': 8, 'MDVP:Shimmer': 9, 'MDVP:Shimmer(dB)': 10, 'Shimmer:APQ3': 11, 'Shimmer:APQ5': 12, 'MDVP:APQ': 13, 'Shimmer:DDA': 14, 'NHR': 15, 'HNR': 16, 'RPDE': 17, 'DFA': 18, 'spread1': 19, 'spread2': 20, 'D2': 21, 'PPE': 22} plt.figure(figsize=(20, 30)) for variable, i in col.items(): plt.subplot(5, 5, i) plt.boxplot(df[variable]) plt.title(variable) plt.show()
code
50224594/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.describe().T df.isnull().sum() df.columns q1 = df.quantile(0.25) q2 = df.quantile(0.5) q3 = df.quantile(0.75) IQR = q3 - q1 print(IQR)
code
50224594/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.head()
code
50224594/cell_10
[ "text_plain_output_1.png" ]
percentage_of_disease = 147 / (147 + 48) * 100 percentage_of_not_having_disease = 48 / (147 + 48) * 100 print('percentage of having disease', percentage_of_disease) print('percentage of not having disease', percentage_of_not_having_disease)
code
50224594/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/parkinsonsxyz/parkinsons2.csv') df.shape df.info()
code
105180183/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/gdp-growth-of-pakistan/GDP Growth of Pakistan - Sheet1 (1).csv') columns = ['Year', 'GDP growth(annual %)'] df.columns = columns df = df[3:] sns.regplot(x='Year', y='GDP growth(annual %)', data=df)
code
105180183/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gdp-growth-of-pakistan/GDP Growth of Pakistan - Sheet1 (1).csv') columns = ['Year', 'GDP growth(annual %)'] df.columns = columns df = df[3:] df.head()
code
105180183/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gdp-growth-of-pakistan/GDP Growth of Pakistan - Sheet1 (1).csv') df.head()
code
105180183/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gdp-growth-of-pakistan/GDP Growth of Pakistan - Sheet1 (1).csv') columns = ['Year', 'GDP growth(annual %)'] df.columns = columns df = df[3:] df.info()
code
105180183/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122249667/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df sns.boxplot(data=df, x='Oldpeak')
code
122249667/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use('seaborn') plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15, 28)) i = 1 for feature in df.columns: if feature not in ['HeartDisease'] and i < 14: plt.subplot(6, 2, i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i += 1
code
122249667/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df fig, axs = plt.subplots(6, 2, figsize=(15, 28)) i = 1 for feature in df.columns: if feature not in ['HeartDisease'] and i < 14: plt.subplot(6, 2, i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i += 1
code
122249667/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.head()
code
122249667/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df sns.boxplot(data=df, x='RestingBP')
code
122249667/cell_30
[ "text_html_output_2.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 labels = ['Less chance of heart attack', 'More chance of heart attack'] values = [df[df['HeartDisease'] == 1].count().to_numpy()[0], df[df['HeartDisease'] == 0].count().to_numpy()[0]] fig = go.Figure(data=[go.Pie(labels=labels, values=values, marker_colors=['cyan', 'darkblue'], textinfo='label+percent')]) fig.update(layout_title_text='Chance of heart attack', layout_showlegend=False) fig.show()
code
122249667/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df
code
122249667/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 px.scatter(data_frame=df, x='Cholesterol', y='MaxHR', color='HeartDisease')
code
122249667/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 px.scatter(data_frame=df, x='Age', y='MaxHR', color='HeartDisease')
code
122249667/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() continuos_f = ['Age', 'RestingBP', 'Cholesterol', 'MaxHR', 'Oldpeak'] categorical_f = ['ChestPainType', 'RestingECG', 'ST_Slope'] binaries_f = ['Sex', 'FastingBS', 'ExerciseAngina'] df.isna().all() df[continuos_f].describe()
code
122249667/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122249667/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates()
code
122249667/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 px.scatter(data_frame=df, x='RestingBP', y='MaxHR', color='HeartDisease')
code
122249667/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.info()
code
122249667/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False)
code
122249667/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) sns.boxplot(data=df, x='Cholesterol')
code
122249667/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all()
code
122249667/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape df.drop_duplicates() df.isna().all() plt.style.use("seaborn") plt.subplots_adjust(hspace=0.2) color = 'winter' fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 df_corr = df.corr() df_corr['HeartDisease'].sort_values(ascending=False) def delete_outliers(label=None): Q1 = df[label].quantile(0.25) Q3 = df[label].quantile(0.75) IQR = Q3 - Q1 df_ch_outliers = df[~((df[label] > Q1 - 1.5 * IQR) & (df[label] < Q3 + 1.5 * IQR))] return df.drop(df_ch_outliers.index) df = delete_outliers('Cholesterol') df fig, axs = plt.subplots(6, 2, figsize=(15,28)) i=1 for feature in df.columns: if feature not in ["HeartDisease"] and i < 14: plt.subplot(6,2,i) sns.histplot(data=df, x=feature, kde=True, palette=color, hue='HeartDisease') i+=1 px.scatter(data_frame=df, x='Oldpeak', y='MaxHR', color='HeartDisease')
code
122249667/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/heart-failure-prediction/heart.csv') df.shape
code
2011514/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()]
code
2011514/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values
code
2011514/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values transactions.iloc[[0, 2, 5]]
code
2011514/cell_23
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values
code
2011514/cell_30
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values transactions.iloc[[0, 2, 5]] transactions.drop([0, 2, 5], axis=0) transactions[:3] transactions[3:] transactions.tail(-2)
code
2011514/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[['ProductID', 'Quantity', 'TransactionDate', 'TransactionID', 'UserID']]
code
2011514/cell_29
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values transactions.iloc[[0, 2, 5]] transactions.drop([0, 2, 5], axis=0) transactions[:3] transactions[3:] transactions.tail(2)
code
2011514/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values transactions.iloc[[0, 2, 5]] transactions.drop([0, 2, 5], axis=0)
code
2011514/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values
code
2011514/cell_7
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0]
code
2011514/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'})
code
2011514/cell_28
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values transactions.iloc[[0, 2, 5]] transactions.drop([0, 2, 5], axis=0) transactions[:3] transactions[3:] transactions.tail(-3)
code
2011514/cell_8
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1]
code
2011514/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count()
code
2011514/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True)
code
2011514/cell_24
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values col = 'ProductID' transactions[[col]].values[:, 0]
code
2011514/cell_22
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2]
code
2011514/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index
code
2011514/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.shape[0] transactions.shape[1] transactions.index transactions.index.values transactions.columns.values transactions.count() transactions.sum(skipna=True, numeric_only=True) transactions.rename(columns={'ProductID': 'PID', 'UserID': 'UID'}) transactions[pd.unique(['UserID'] + transactions.columns.values.tolist()).tolist()] transactions.iloc[:, 2] transactions.ProductID.values transactions.iloc[[0, 2, 5]] transactions.drop([0, 2, 5], axis=0) transactions[:3] transactions.head(3)
code
2011514/cell_5
[ "text_html_output_1.png" ]
import pandas as pd transactions = pd.read_csv('../input/transactions.csv') transactions.info()
code
129032387/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False) df.info()
code
129032387/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df.info()
code
129032387/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False)
code
129032387/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129032387/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df.dropna(how='all') df.info()
code
129032387/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df.dropna(how='all') df['status'].value_counts()
code
129032387/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False) print(df.head(2))
code
129032387/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/pakistans-largest-ecommerce-dataset/Pakistan Largest Ecommerce Dataset.csv', low_memory=False) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df.dropna(how='all') df['status'].value_counts()
code
32068380/cell_20
[ "image_output_1.png" ]
from IPython.display import Image Image('../input/images-search-engine-cord19/abstract.PNG')
code
32068380/cell_18
[ "image_output_1.png" ]
from IPython.display import Image from IPython.display import Image Image('../input/images-search-engine-cord19/results.PNG')
code
32068380/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd def get_data(dir_path): """ Take as input a directory path containing json files from biorxiv_medrxiv, comm_use_subset, noncomm_use_subset or custom_license. Four dataframes are returned: papers_df, authors_df, affiliations_df, bib_entries_df """ files = os.listdir(dir_path) papers_df = pd.DataFrame(columns=['paper_id', 'title', 'authors', 'abstract', 'body_text', 'bib_titles', 'dataset']) authors_df = pd.DataFrame(columns=['author', 'affiliation']) affiliations_df = pd.DataFrame(columns=['affiliation', 'country']) bib_entries_df = pd.DataFrame(columns=['title', 'authors', 'year', 'venue']) line_author_df = 0 line_affiliations_df = 0 line_bib_entries_df = 0 for line, file in enumerate(files): n_files = len(files) file_path = os.path.join('../data/{}'.format(dir), file) with open(file_path) as f: data = json.load(f) paper_id = data['paper_id'] title = data['metadata']['title'] authors, affiliations, countries = ('', '', '') for author in data['metadata']['authors']: first_last_name = author['first'] + ' ' + author['last'] authors = authors + ' || ' + first_last_name if author['affiliation'] == {}: affiliation = 'NA' affiliations = affiliations + ' || ' + affiliation.strip() country = 'NA' countries = countries + ' || ' + country.strip() continue affiliation = author['affiliation']['laboratory'] + ' ' + author['affiliation']['institution'] affiliations = affiliations + ' || ' + affiliation.strip() if 'country' not in author['affiliation']['location'].keys(): country = 'NA' countries = countries + ' || ' + country continue country = author['affiliation']['location']['country'] countries = countries + ' || ' + country authors = authors[4:] affiliations = affiliations[4:] countries = countries[4:] abstract = '' for info in data['abstract']: abstract = abstract + ' ' + info['text'] abstract = abstract.strip() body_text = '' for info in data['body_text']: body_text = body_text + ' ' + info['text'] body_text = body_text.strip() bib_titles, bib_authors, years, venues = ('', '', '', '') for bib in data['bib_entries']: bib_titles = bib_titles + ' || ' + data['bib_entries'][bib]['title'] year = data['bib_entries'][bib]['year'] years = years + ' || ' + str(year) venue = data['bib_entries'][bib]['venue'] venues = venues + ' || ' + venue bib_author = [author['first'] + ' ' + author['last'] for author in data['bib_entries'][bib]['authors']] bib_author = ' | '.join(bib_author) bib_authors = bib_authors + ' || ' + bib_author bib_titles, bib_authors, years, venues = (bib_titles[4:], bib_authors[4:], years[4:], venues[4:]) papers_df.loc[line, :] = [paper_id, title, authors, abstract, body_text, bib_titles, dir] authors_list = authors.split(' || ') affiliations_list = affiliations.split(' || ') for i in range(len(authors_list)): authors_df.loc[line_author_df, :] = (authors_list[i], affiliations_list[i]) line_author_df += 1 countries_list = countries.split(' || ') for i in range(len(affiliations_list)): affiliations_df.loc[line_affiliations_df, :] = (affiliations_list[i], countries_list[i]) line_affiliations_df += 1 bib_titles_list = bib_titles.split(' || ') bib_authors_list = bib_authors.split(' || ') years_list = years.split(' || ') venues_list = venues.split(' || ') for i in range(len(bib_titles_list)): bib_entries_df.loc[line_bib_entries_df, :] = (bib_titles_list[i], bib_authors_list[i], years_list[i], venues_list[i]) line_bib_entries_df += 1 authors_df = authors_df.drop_duplicates().reset_index(drop=True) affiliations_df = affiliations_df.drop_duplicates().reset_index(drop=True) bib_entries_df = bib_entries_df.drop_duplicates().reset_index(drop=True) return (papers_df, authors_df, affiliations_df, bib_entries_df) df = pd.read_csv('/kaggle/input/papers/papers.csv', sep=';', nrows=100) df = df.drop_duplicates() df.shape
code
32068380/cell_43
[ "text_html_output_1.png" ]
import pandas as pd def get_data(dir_path): """ Take as input a directory path containing json files from biorxiv_medrxiv, comm_use_subset, noncomm_use_subset or custom_license. Four dataframes are returned: papers_df, authors_df, affiliations_df, bib_entries_df """ files = os.listdir(dir_path) papers_df = pd.DataFrame(columns=['paper_id', 'title', 'authors', 'abstract', 'body_text', 'bib_titles', 'dataset']) authors_df = pd.DataFrame(columns=['author', 'affiliation']) affiliations_df = pd.DataFrame(columns=['affiliation', 'country']) bib_entries_df = pd.DataFrame(columns=['title', 'authors', 'year', 'venue']) line_author_df = 0 line_affiliations_df = 0 line_bib_entries_df = 0 for line, file in enumerate(files): n_files = len(files) file_path = os.path.join('../data/{}'.format(dir), file) with open(file_path) as f: data = json.load(f) paper_id = data['paper_id'] title = data['metadata']['title'] authors, affiliations, countries = ('', '', '') for author in data['metadata']['authors']: first_last_name = author['first'] + ' ' + author['last'] authors = authors + ' || ' + first_last_name if author['affiliation'] == {}: affiliation = 'NA' affiliations = affiliations + ' || ' + affiliation.strip() country = 'NA' countries = countries + ' || ' + country.strip() continue affiliation = author['affiliation']['laboratory'] + ' ' + author['affiliation']['institution'] affiliations = affiliations + ' || ' + affiliation.strip() if 'country' not in author['affiliation']['location'].keys(): country = 'NA' countries = countries + ' || ' + country continue country = author['affiliation']['location']['country'] countries = countries + ' || ' + country authors = authors[4:] affiliations = affiliations[4:] countries = countries[4:] abstract = '' for info in data['abstract']: abstract = abstract + ' ' + info['text'] abstract = abstract.strip() body_text = '' for info in data['body_text']: body_text = body_text + ' ' + info['text'] body_text = body_text.strip() bib_titles, bib_authors, years, venues = ('', '', '', '') for bib in data['bib_entries']: bib_titles = bib_titles + ' || ' + data['bib_entries'][bib]['title'] year = data['bib_entries'][bib]['year'] years = years + ' || ' + str(year) venue = data['bib_entries'][bib]['venue'] venues = venues + ' || ' + venue bib_author = [author['first'] + ' ' + author['last'] for author in data['bib_entries'][bib]['authors']] bib_author = ' | '.join(bib_author) bib_authors = bib_authors + ' || ' + bib_author bib_titles, bib_authors, years, venues = (bib_titles[4:], bib_authors[4:], years[4:], venues[4:]) papers_df.loc[line, :] = [paper_id, title, authors, abstract, body_text, bib_titles, dir] authors_list = authors.split(' || ') affiliations_list = affiliations.split(' || ') for i in range(len(authors_list)): authors_df.loc[line_author_df, :] = (authors_list[i], affiliations_list[i]) line_author_df += 1 countries_list = countries.split(' || ') for i in range(len(affiliations_list)): affiliations_df.loc[line_affiliations_df, :] = (affiliations_list[i], countries_list[i]) line_affiliations_df += 1 bib_titles_list = bib_titles.split(' || ') bib_authors_list = bib_authors.split(' || ') years_list = years.split(' || ') venues_list = venues.split(' || ') for i in range(len(bib_titles_list)): bib_entries_df.loc[line_bib_entries_df, :] = (bib_titles_list[i], bib_authors_list[i], years_list[i], venues_list[i]) line_bib_entries_df += 1 authors_df = authors_df.drop_duplicates().reset_index(drop=True) affiliations_df = affiliations_df.drop_duplicates().reset_index(drop=True) bib_entries_df = bib_entries_df.drop_duplicates().reset_index(drop=True) return (papers_df, authors_df, affiliations_df, bib_entries_df) df = pd.read_csv('/kaggle/input/papers/papers.csv', sep=';', nrows=100) meta_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv') meta_df.head()
code
32068380/cell_24
[ "image_output_1.png" ]
from IPython.display import Image Image('../input/images-search-engine-cord19/sentences.PNG')
code
32068380/cell_22
[ "image_output_1.png" ]
from IPython.display import Image Image('../input/images-search-engine-cord19/sentences.PNG')
code
32068380/cell_37
[ "text_html_output_1.png" ]
import pandas as pd def get_data(dir_path): """ Take as input a directory path containing json files from biorxiv_medrxiv, comm_use_subset, noncomm_use_subset or custom_license. Four dataframes are returned: papers_df, authors_df, affiliations_df, bib_entries_df """ files = os.listdir(dir_path) papers_df = pd.DataFrame(columns=['paper_id', 'title', 'authors', 'abstract', 'body_text', 'bib_titles', 'dataset']) authors_df = pd.DataFrame(columns=['author', 'affiliation']) affiliations_df = pd.DataFrame(columns=['affiliation', 'country']) bib_entries_df = pd.DataFrame(columns=['title', 'authors', 'year', 'venue']) line_author_df = 0 line_affiliations_df = 0 line_bib_entries_df = 0 for line, file in enumerate(files): n_files = len(files) file_path = os.path.join('../data/{}'.format(dir), file) with open(file_path) as f: data = json.load(f) paper_id = data['paper_id'] title = data['metadata']['title'] authors, affiliations, countries = ('', '', '') for author in data['metadata']['authors']: first_last_name = author['first'] + ' ' + author['last'] authors = authors + ' || ' + first_last_name if author['affiliation'] == {}: affiliation = 'NA' affiliations = affiliations + ' || ' + affiliation.strip() country = 'NA' countries = countries + ' || ' + country.strip() continue affiliation = author['affiliation']['laboratory'] + ' ' + author['affiliation']['institution'] affiliations = affiliations + ' || ' + affiliation.strip() if 'country' not in author['affiliation']['location'].keys(): country = 'NA' countries = countries + ' || ' + country continue country = author['affiliation']['location']['country'] countries = countries + ' || ' + country authors = authors[4:] affiliations = affiliations[4:] countries = countries[4:] abstract = '' for info in data['abstract']: abstract = abstract + ' ' + info['text'] abstract = abstract.strip() body_text = '' for info in data['body_text']: body_text = body_text + ' ' + info['text'] body_text = body_text.strip() bib_titles, bib_authors, years, venues = ('', '', '', '') for bib in data['bib_entries']: bib_titles = bib_titles + ' || ' + data['bib_entries'][bib]['title'] year = data['bib_entries'][bib]['year'] years = years + ' || ' + str(year) venue = data['bib_entries'][bib]['venue'] venues = venues + ' || ' + venue bib_author = [author['first'] + ' ' + author['last'] for author in data['bib_entries'][bib]['authors']] bib_author = ' | '.join(bib_author) bib_authors = bib_authors + ' || ' + bib_author bib_titles, bib_authors, years, venues = (bib_titles[4:], bib_authors[4:], years[4:], venues[4:]) papers_df.loc[line, :] = [paper_id, title, authors, abstract, body_text, bib_titles, dir] authors_list = authors.split(' || ') affiliations_list = affiliations.split(' || ') for i in range(len(authors_list)): authors_df.loc[line_author_df, :] = (authors_list[i], affiliations_list[i]) line_author_df += 1 countries_list = countries.split(' || ') for i in range(len(affiliations_list)): affiliations_df.loc[line_affiliations_df, :] = (affiliations_list[i], countries_list[i]) line_affiliations_df += 1 bib_titles_list = bib_titles.split(' || ') bib_authors_list = bib_authors.split(' || ') years_list = years.split(' || ') venues_list = venues.split(' || ') for i in range(len(bib_titles_list)): bib_entries_df.loc[line_bib_entries_df, :] = (bib_titles_list[i], bib_authors_list[i], years_list[i], venues_list[i]) line_bib_entries_df += 1 authors_df = authors_df.drop_duplicates().reset_index(drop=True) affiliations_df = affiliations_df.drop_duplicates().reset_index(drop=True) bib_entries_df = bib_entries_df.drop_duplicates().reset_index(drop=True) return (papers_df, authors_df, affiliations_df, bib_entries_df) df = pd.read_csv('/kaggle/input/papers/papers.csv', sep=';', nrows=100) df.head()
code
1010064/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_json('../input/train.json', typ='frame') test_df = pd.read_json('../input/test.json', typ='frame') print(train_df.shape) print('----------') print(test_df.shape)
code
1010064/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1010064/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_json('../input/train.json', typ='frame') test_df = pd.read_json('../input/test.json', typ='frame') train_df.head()
code
1010064/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_json('../input/train.json', typ='frame') test_df = pd.read_json('../input/test.json', typ='frame') train_df.info() print('-------------------') test_df.info()
code
73065927/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.dtypes df.isnull().sum() n = 0 for x in ['Age', 'Annual Income (k$)', 'Spending Score (1-100)']: n += 1 df.columns
code
73065927/cell_4
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.head()
code
73065927/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.dtypes df.isnull().sum() n = 0 for x in ['Age', 'Annual Income (k$)', 'Spending Score (1-100)']: n += 1 sns.heatmap(df.corr())
code
73065927/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73065927/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.dtypes
code
73065927/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.dtypes df.isnull().sum()
code
73065927/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.dtypes df.isnull().sum() n = 0 for x in ['Age', 'Annual Income (k$)', 'Spending Score (1-100)']: n += 1 df.columns x = df['']
code
73065927/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.dtypes df.isnull().sum() plt.figure(1, figsize=(15, 6)) n = 0 for x in ['Age', 'Annual Income (k$)', 'Spending Score (1-100)']: n += 1 plt.subplot(1, 3, n) sns.distplot(df[x], bins=20) plt.title('Distplot of {}'.format(x)) plt.show()
code
73065927/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.describe()
code