path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
89136278/cell_10
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import torch import torchvision.transforms as transforms device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') imsize = (512, 220) if torch.cuda.is_available() else (128, 220) loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()]) def image_loader(image_name): image = Image.open(image_name) image = loader(image).unsqueeze(0) return image.to(device, torch.float) style_img = image_loader('./style.jpg') content_img = image_loader('./content.jpg') assert style_img.size() == content_img.size(), 'we need to import style and content images of the same size' unloader = transforms.ToPILImage() plt.ion() def imshow(tensor, title=None): image = tensor.cpu().clone() image = image.squeeze(0) image = unloader(image) plt.imshow(image) if title is not None: plt.title(title) plt.pause(0.001) plt.figure() imshow(style_img, title='Style Image') plt.figure() imshow(content_img, title='Content Image')
code
89136278/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
!wget -O style.jpg "https://cdn.britannica.com/89/196489-138-8770A1D5/Vincent-van-Gogh-life-work.jpg" !wget -O content.jpg "https://www.indiewire.com/wp-content/uploads/2016/08/big-totoro-e1538413562225.jpeg"
code
2032991/cell_9
[ "text_plain_output_1.png" ]
import nltk import re null_text = X.comment_text[2] X.shape X.isnull().sum() y = X[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] try: X.drop(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'], axis=1, inplace=True) except: pass import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t X.comment_text = X.comment_text.apply(lambda x: preprocess_input(x)) X.head()
code
2032991/cell_4
[ "text_plain_output_1.png" ]
null_text = X.comment_text[2] X.shape X.isnull().sum()
code
2032991/cell_11
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import nltk import re null_text = X.comment_text[2] X.shape X.isnull().sum() y = X[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] try: X.drop(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'], axis=1, inplace=True) except: pass import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t X.comment_text = X.comment_text.apply(lambda x: preprocess_input(x)) from sklearn.feature_extraction.text import TfidfVectorizer from nltk.tokenize import word_tokenize vect = TfidfVectorizer(min_df=5, max_df=0.7, ngram_range=(1, 2), strip_accents='unicode', smooth_idf=True, sublinear_tf=True, max_features=10000) vect = vect.fit(X['comment_text']) X_vect = vect.transform(X['comment_text']) X_vect.shape
code
2032991/cell_19
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss import nltk import pandas as pd import re null_text = X.comment_text[2] X.shape X.isnull().sum() y = X[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] try: X.drop(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'], axis=1, inplace=True) except: pass import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t X.comment_text = X.comment_text.apply(lambda x: preprocess_input(x)) from sklearn.feature_extraction.text import TfidfVectorizer from nltk.tokenize import word_tokenize vect = TfidfVectorizer(min_df=5, max_df=0.7, ngram_range=(1, 2), strip_accents='unicode', smooth_idf=True, sublinear_tf=True, max_features=10000) vect = vect.fit(X['comment_text']) X_vect = vect.transform(X['comment_text']) X_vect.shape test = pd.read_csv('../input/test.csv') test.fillna(value=null_text, inplace=True) t_id = test['id'] test.drop(['id'], axis=1, inplace=True) test.comment_text = test.comment_text.apply(lambda z: preprocess_input(z)) X_test = vect.transform(test['comment_text']) X_test.shape y.iloc[:, 1] from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] y_pred = pd.read_csv('../input/sample_submission.csv') for c in cols: clf = LogisticRegression(C=4, solver='sag') clf.fit(X_vect, y[c]) y_pred[c] = clf.predict_proba(X_test)[:, 1] pred_train = clf.predict_proba(X_vect)[:, 1] print('log loss:', log_loss(y[c], pred_train))
code
2032991/cell_1
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd X = pd.read_csv('../input/train.csv') X.head()
code
2032991/cell_7
[ "text_plain_output_1.png" ]
import nltk import re null_text = X.comment_text[2] import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t preprocess_input(null_text)
code
2032991/cell_18
[ "text_html_output_1.png" ]
null_text = X.comment_text[2] X.shape X.isnull().sum() y = X[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] try: X.drop(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'], axis=1, inplace=True) except: pass y.iloc[:, 1]
code
2032991/cell_15
[ "text_html_output_1.png" ]
import nltk import pandas as pd import re null_text = X.comment_text[2] import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t test = pd.read_csv('../input/test.csv') test.fillna(value=null_text, inplace=True) t_id = test['id'] test.drop(['id'], axis=1, inplace=True) test.comment_text = test.comment_text.apply(lambda z: preprocess_input(z)) len(test)
code
2032991/cell_3
[ "text_plain_output_1.png" ]
null_text = X.comment_text[2] X.shape
code
2032991/cell_17
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import nltk import pandas as pd import re null_text = X.comment_text[2] X.shape X.isnull().sum() y = X[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] try: X.drop(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'], axis=1, inplace=True) except: pass import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t X.comment_text = X.comment_text.apply(lambda x: preprocess_input(x)) from sklearn.feature_extraction.text import TfidfVectorizer from nltk.tokenize import word_tokenize vect = TfidfVectorizer(min_df=5, max_df=0.7, ngram_range=(1, 2), strip_accents='unicode', smooth_idf=True, sublinear_tf=True, max_features=10000) vect = vect.fit(X['comment_text']) X_vect = vect.transform(X['comment_text']) test = pd.read_csv('../input/test.csv') test.fillna(value=null_text, inplace=True) t_id = test['id'] test.drop(['id'], axis=1, inplace=True) test.comment_text = test.comment_text.apply(lambda z: preprocess_input(z)) X_test = vect.transform(test['comment_text']) X_test.shape
code
2032991/cell_14
[ "text_plain_output_1.png" ]
import nltk import pandas as pd import re null_text = X.comment_text[2] import re import nltk stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess_input(t): t = t.strip() z = re.findall('[A-Za-z]+', t) z = [a for a in z if len(a) > 3] wnlemma = nltk.stem.WordNetLemmatizer() z = [wnlemma.lemmatize(a) for a in z] z = [a for a in z if not a in stop_words] t = ' '.join(z) return t test = pd.read_csv('../input/test.csv') test.fillna(value=null_text, inplace=True) t_id = test['id'] test.drop(['id'], axis=1, inplace=True) test.comment_text = test.comment_text.apply(lambda z: preprocess_input(z)) test.head()
code
2032991/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd null_text = X.comment_text[2] test = pd.read_csv('../input/test.csv') test.fillna(value=null_text, inplace=True) test.head()
code
88102456/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest dfErrorsOrg = df_new[df_new['errorbalanceOrg'] != 0] dfErrorsOrg[['errorbalanceOrg', 'isFraud']].groupby('isFraud').count() dfErrorsOrg = df_new[df_new['errorbalanceOrg'] == 0] dfErrorsOrg[['errorbalanceOrg', 'isFraud']].groupby('isFraud').count()
code
88102456/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount frauds = df[df['isFraud'] == 1] frauds['type'].unique()
code
88102456/cell_25
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest df_new['hour_of_day'] = df_new['step'] % 24
code
88102456/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest dfErrorsOrg = df_new[df_new['errorbalanceOrg'] != 0] dfErrorsOrg[['errorbalanceOrg', 'isFraud']].groupby('isFraud').count()
code
88102456/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns
code
88102456/cell_19
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest
code
88102456/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88102456/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount first17 = df[df['step'] < 17 * 24] fraudcount = first17[first17['isFraud'] == 1].count() totalcount = first17.count() fraudcount / totalcount
code
88102456/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns sns.set_theme(style='darkgrid') print(df.type.value_counts()) f, ax = plt.subplots(1, 1, figsize=(8, 8)) df.type.value_counts().plot(kind='bar', title='Transaction type', ax=ax, figsize=(8, 8)) plt.ticklabel_format(style='plain', axis='y') for p in ax.patches: ax.annotate(str(format(int(p.get_height()), ',d')), (p.get_x(), p.get_height())) plt.show()
code
88102456/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest dfErrorsDest = df_new[df_new['errorbalanceDest'] != 0] dfErrorsDest[['errorbalanceDest', 'isFraud']].groupby('isFraud').count() dfErrorsDest = df_new[df_new['errorbalanceDest'] == 0] dfErrorsDest[['errorbalanceDest', 'isFraud']].groupby('isFraud').count()
code
88102456/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount incorrectlyFlaggedFraud = df[(df['isFlaggedFraud'] == 1) & (df['isFraud'] == 0)] incorrectlyFlaggedFraud['type'].unique()
code
88102456/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest dfErrorsDest = df_new[df_new['errorbalanceDest'] != 0] dfErrorsDest[['errorbalanceDest', 'isFraud']].groupby('isFraud').count()
code
88102456/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount frauds = df[df['isFraud'] == 1] frauds['type'].unique() frauds['day'] = round(frauds['step'] / 24) frauds_by_day = frauds[['step', 'day']].groupby('day').count() frauds_by_day = frauds_by_day.rename(columns={'step': 'count'}) frauds_by_day.plot(kind='bar', figsize=(10, 5), title='Number of frauds per day')
code
88102456/cell_27
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount df_new = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')] df_new['errorbalanceOrg'] = df_new.newbalanceOrig + df_new.amount - df_new.oldbalanceOrg df_new['errorbalanceDest'] = df_new.oldbalanceDest + df_new.amount - df_new.newbalanceDest df_new['isFraud'].groupby('hour_of_day').count()
code
88102456/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount correctlyFlaggedFraud = df[(df['isFlaggedFraud'] == 1) & (df['isFraud'] == 1)] correctlyFlaggedFraud['type'].unique()
code
88102456/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/paysim1/PS_20174392719_1491204439457_log.csv') df.columns fraudcount = df[df['isFraud'] == 1].count() totalcount = df.count() fraudcount / totalcount
code
32068294/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.drop('Dummy', axis=1, inplace=True) df.drop('Formatted Date', axis=1, inplace=True) df.head(2)
code
32068294/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) df.head(5)
code
32068294/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df.describe()
code
32068294/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.drop('Dummy', axis=1, inplace=True) df.drop('Formatted Date', axis=1, inplace=True) df.iloc[:, 9].value_counts() df.iloc[:, 9].nunique()
code
32068294/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.drop('Dummy', axis=1, inplace=True) df.head(2)
code
32068294/cell_26
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.drop('Dummy', axis=1, inplace=True) df.drop('Formatted Date', axis=1, inplace=True) df.iloc[:, 9].value_counts() df.iloc[:, 9].nunique() df.isnull().sum(axis=0)
code
32068294/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.head(4)
code
32068294/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068294/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0)
code
32068294/cell_8
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts()
code
32068294/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.head(3)
code
32068294/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0)
code
32068294/cell_22
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.drop('Dummy', axis=1, inplace=True) df.drop('Formatted Date', axis=1, inplace=True) df.iloc[:, 9].value_counts()
code
32068294/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts()
code
32068294/cell_27
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() summary_dict = {'Partly Cloudy': 0, 'Mostly Cloudy': 1, 'Overcast': 2, 'Clear': 3, 'Foggy': 4, 'Breezy': 5, 'Windy': 6, 'Dry': 7, 'Rain': 8, 'Humid': 9} df.shape[0] df = df.replace({'Summary': summary_dict}) cols_to_transform = ['Precip Type'] df_with_dummies = pd.get_dummies(df, columns=cols_to_transform, drop_first=True) df = df_with_dummies df.drop('Dummy', axis=1, inplace=True) df.drop('Formatted Date', axis=1, inplace=True) df.iloc[:, 9].value_counts() df.iloc[:, 9].nunique() df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0)
code
32068294/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum(axis=0) df = df.dropna(how='any', axis=0) df.isnull().sum(axis=0) df.iloc[:, 1].value_counts() df.iloc[:, 1].value_counts() df.shape[0]
code
32065291/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import matplotlib.pyplot as plt plt.style.use('dark_background') import warnings warnings.filterwarnings('ignore') import seaborn train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_data.rename(columns={'Country_Region': 'Country'}, inplace=True) test_data.rename(columns={'Country_Region': 'Country'}, inplace=True) train_data.rename(columns={'Province_State': 'Province'}, inplace=True) test_data.rename(columns={'Province_State': 'Province'}, inplace=True) india = train_data.loc[train_data['Country'] == 'India'] plt.xticks(rotation=90) confirmed = train_data.groupby('Date').sum()['ConfirmedCases'].reset_index() deaths = train_data.groupby('Date').sum()['Fatalities'].reset_index() plt.xticks(rotation=90) train_data['Date'] = pd.to_datetime(train_data['Date']) test_data['Date'] = pd.to_datetime(test_data['Date']) train_data['Province'] = train_data.apply(lambda row: str(row['Country']) if pd.isnull(row['Province']) else row['Province'], axis=1) test_data['Province'] = test_data.apply(lambda row: str(row['Country']) if pd.isnull(row['Province']) else row['Province'], axis=1) train_data.drop(['Id'], axis=1, inplace=True) train_data.head()
code
32065291/cell_4
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') print(train_data.shape) print(test_data.shape)
code
32065291/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import matplotlib.pyplot as plt plt.style.use('dark_background') import warnings warnings.filterwarnings('ignore') import seaborn train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_data.rename(columns={'Country_Region': 'Country'}, inplace=True) test_data.rename(columns={'Country_Region': 'Country'}, inplace=True) train_data.rename(columns={'Province_State': 'Province'}, inplace=True) test_data.rename(columns={'Province_State': 'Province'}, inplace=True) india = train_data.loc[train_data['Country'] == 'India'] plt.figure(figsize=(20, 10)) plt.bar(india.Date, india.ConfirmedCases) plt.bar(india.Date, india.Fatalities) plt.title('INDIA Circumstances') plt.xticks(rotation=90) plt.show()
code
32065291/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32065291/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import matplotlib.pyplot as plt plt.style.use('dark_background') import warnings warnings.filterwarnings('ignore') import seaborn train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_data.rename(columns={'Country_Region': 'Country'}, inplace=True) test_data.rename(columns={'Country_Region': 'Country'}, inplace=True) train_data.rename(columns={'Province_State': 'Province'}, inplace=True) test_data.rename(columns={'Province_State': 'Province'}, inplace=True) india = train_data.loc[train_data['Country'] == 'India'] plt.xticks(rotation=90) confirmed = train_data.groupby('Date').sum()['ConfirmedCases'].reset_index() deaths = train_data.groupby('Date').sum()['Fatalities'].reset_index() plt.figure(figsize=(22, 9)) plt.bar(confirmed['Date'], confirmed['ConfirmedCases']) plt.title('World Circumstances') plt.bar(deaths['Date'], deaths['Fatalities']) plt.xticks(rotation=90) plt.show()
code
18114582/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0 work_data = pd.concat([data['cnt'], data['workingday']], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x="workingday", y="cnt", data=work_data) fig.axis(ymin=0, ymax=1000); # hypothesis was valid # the overall mean for accidents on a workingday is higher than week_data = pd.concat([data['cnt'], data['weekday']], axis=1) fig = sns.boxplot(x='weekday', y='cnt', data=week_data) fig.axis(ymin=0, ymax=1000)
code
18114582/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0 sns.distplot(data['cnt'])
code
18114582/cell_6
[ "image_output_1.png" ]
import pandas as pd data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0
code
18114582/cell_7
[ "image_output_1.png" ]
import pandas as pd data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0 data.describe()
code
18114582/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0 work_data = pd.concat([data['cnt'], data['workingday']], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x="workingday", y="cnt", data=work_data) fig.axis(ymin=0, ymax=1000); # hypothesis was valid # the overall mean for accidents on a workingday is higher than week_data = pd.concat([data['cnt'], data['weekday']], axis=1) fig = sns.boxplot(x="weekday", y="cnt", data=week_data) fig.axis(ymin=0, ymax=1000); # hypothesis was valid # the overall mean for accidents on a workingday is higher than on non working days sns.catplot(x='weathersit', y='cnt', hue='yr', data=data, height=6, kind='bar', palette='muted')
code
18114582/cell_3
[ "image_output_1.png" ]
import pandas as pd data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) data.head(5)
code
18114582/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0 print('Skewness: %f' % data['cnt'].skew()) print('Kurtosis: %f' % data['cnt'].kurt())
code
18114582/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) sum(data.duplicated(subset='index')) == 0 work_data = pd.concat([data['cnt'], data['workingday']], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x='workingday', y='cnt', data=work_data) fig.axis(ymin=0, ymax=1000)
code
18114582/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.DataFrame(pd.read_csv('../input/motorbike_ambulance_calls.csv')) data.info()
code
88095865/cell_4
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.sql import SparkSession spark = SparkSession.builder.appName('taxi').getOrCreate()
code
88095865/cell_29
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.ml import Pipeline from pyspark.ml.feature import StringIndexer from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import DecisionTreeRegressor, GBTRegressor si = StringIndexer(inputCol='store_and_fwd_flag', outputCol='store_and_fwd_flag_si', handleInvalid='skip') va = VectorAssembler(inputCols=['VendorID', 'day', 'hour', 'passenger_count', 'trip_distance', 'RateCodeID', 'store_and_fwd_flag_si', 'PULocationID', 'DOLocationID', 'payment_type'], outputCol='features') gbt = GBTRegressor(featuresCol='features', labelCol='total_amount', maxDepth=5, maxIter=5, seed=42) pipeline = Pipeline(stages=[si, va, gbt]) model = pipeline.fit(train) predictions = model.transform(test) predictions.select('prediction', 'total_amount', 'features').show(5)
code
88095865/cell_26
[ "text_plain_output_1.png" ]
from pyspark.ml import Pipeline from pyspark.ml.feature import StringIndexer from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import DecisionTreeRegressor, GBTRegressor si = StringIndexer(inputCol='store_and_fwd_flag', outputCol='store_and_fwd_flag_si', handleInvalid='skip') va = VectorAssembler(inputCols=['VendorID', 'day', 'hour', 'passenger_count', 'trip_distance', 'RateCodeID', 'store_and_fwd_flag_si', 'PULocationID', 'DOLocationID', 'payment_type'], outputCol='features') gbt = GBTRegressor(featuresCol='features', labelCol='total_amount', maxDepth=5, maxIter=5, seed=42) pipeline = Pipeline(stages=[si, va, gbt]) model = pipeline.fit(train)
code
88095865/cell_2
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
#Installing pyspark !pip install pyspark
code
88095865/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.types import StringType, IntegerType, StructType, StructField, TimestampType, DoubleType spark = SparkSession.builder.appName('taxi').getOrCreate() schema = StructType([StructField('VendorID', IntegerType(), True), StructField('tpep_pickup_datetime', TimestampType(), True), StructField('tpep_dropoff_datetime', TimestampType(), True), StructField('passenger_count', IntegerType(), True), StructField('trip_distance', DoubleType(), True), StructField('RatecodeID', IntegerType(), True), StructField('store_and_fwd_flag', StringType(), True), StructField('PULocationID', IntegerType(), True), StructField('DOLocationID', IntegerType(), True), StructField('payment_type', IntegerType(), True), StructField('fare_amount', DoubleType(), True), StructField('extra', DoubleType(), True), StructField('mta_tax', DoubleType(), True), StructField('tip_amount', DoubleType(), True), StructField('tolls_amount', IntegerType(), True), StructField('improvement_surcharge', DoubleType(), True), StructField('total_amount', DoubleType(), True), StructField('congestion_surcharge', DoubleType(), True)]) df = spark.read.csv('/kaggle/input/newyork-yellow-taxi-trip-data-2020-2019/*2020*', schema=schema, header=True) len(df.columns) df.show(5)
code
88095865/cell_19
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.types import StringType, IntegerType, StructType, StructField, TimestampType, DoubleType import pyspark.sql.functions as F spark = SparkSession.builder.appName('taxi').getOrCreate() schema = StructType([StructField('VendorID', IntegerType(), True), StructField('tpep_pickup_datetime', TimestampType(), True), StructField('tpep_dropoff_datetime', TimestampType(), True), StructField('passenger_count', IntegerType(), True), StructField('trip_distance', DoubleType(), True), StructField('RatecodeID', IntegerType(), True), StructField('store_and_fwd_flag', StringType(), True), StructField('PULocationID', IntegerType(), True), StructField('DOLocationID', IntegerType(), True), StructField('payment_type', IntegerType(), True), StructField('fare_amount', DoubleType(), True), StructField('extra', DoubleType(), True), StructField('mta_tax', DoubleType(), True), StructField('tip_amount', DoubleType(), True), StructField('tolls_amount', IntegerType(), True), StructField('improvement_surcharge', DoubleType(), True), StructField('total_amount', DoubleType(), True), StructField('congestion_surcharge', DoubleType(), True)]) df = spark.read.csv('/kaggle/input/newyork-yellow-taxi-trip-data-2020-2019/*2020*', schema=schema, header=True) len(df.columns) df = df.sample(fraction=0.1, seed=42) df = df.withColumn('day', F.dayofmonth(df.tpep_pickup_datetime)) df = df.withColumn('hour', F.hour(df.tpep_pickup_datetime)) df = df.select('VendorID', 'day', 'hour', 'passenger_count', 'trip_distance', 'RateCodeID', 'store_and_fwd_flag', 'PULocationID', 'DOLocationID', 'payment_type', 'total_amount') df.printSchema()
code
88095865/cell_31
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.ml import Pipeline from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.feature import StringIndexer from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import DecisionTreeRegressor, GBTRegressor si = StringIndexer(inputCol='store_and_fwd_flag', outputCol='store_and_fwd_flag_si', handleInvalid='skip') va = VectorAssembler(inputCols=['VendorID', 'day', 'hour', 'passenger_count', 'trip_distance', 'RateCodeID', 'store_and_fwd_flag_si', 'PULocationID', 'DOLocationID', 'payment_type'], outputCol='features') gbt = GBTRegressor(featuresCol='features', labelCol='total_amount', maxDepth=5, maxIter=5, seed=42) pipeline = Pipeline(stages=[si, va, gbt]) model = pipeline.fit(train) predictions = model.transform(test) evaluator = RegressionEvaluator(labelCol='total_amount', predictionCol='prediction', metricName='rmse') rmse = evaluator.evaluate(predictions) print('Root Mean Squared Error (RMSE) on test data = %g' % rmse)
code
88095865/cell_10
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql.types import StringType, IntegerType, StructType, StructField, TimestampType, DoubleType spark = SparkSession.builder.appName('taxi').getOrCreate() schema = StructType([StructField('VendorID', IntegerType(), True), StructField('tpep_pickup_datetime', TimestampType(), True), StructField('tpep_dropoff_datetime', TimestampType(), True), StructField('passenger_count', IntegerType(), True), StructField('trip_distance', DoubleType(), True), StructField('RatecodeID', IntegerType(), True), StructField('store_and_fwd_flag', StringType(), True), StructField('PULocationID', IntegerType(), True), StructField('DOLocationID', IntegerType(), True), StructField('payment_type', IntegerType(), True), StructField('fare_amount', DoubleType(), True), StructField('extra', DoubleType(), True), StructField('mta_tax', DoubleType(), True), StructField('tip_amount', DoubleType(), True), StructField('tolls_amount', IntegerType(), True), StructField('improvement_surcharge', DoubleType(), True), StructField('total_amount', DoubleType(), True), StructField('congestion_surcharge', DoubleType(), True)]) df = spark.read.csv('/kaggle/input/newyork-yellow-taxi-trip-data-2020-2019/*2020*', schema=schema, header=True) len(df.columns)
code
105199619/cell_6
[ "text_plain_output_1.png" ]
!pip uninstall -q -y transformers
code
89124318/cell_21
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train)
code
89124318/cell_34
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression regressor1 = LinearRegression() regressor1.fit(X_train1, y_train1) y_pred1 = regressor1.predict(X_test1) mean_squared_error(y_test1, y_pred1)
code
89124318/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) plt.scatter(X_train, y_train, color='red') plt.plot(X_train, regressor.predict(X_train), color='blue') plt.title('Salary vs Experience (Training set)') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.show()
code
89124318/cell_33
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression regressor1 = LinearRegression() regressor1.fit(X_train1, y_train1) y_pred1 = regressor1.predict(X_test1) print(mean_squared_error(y_test1, y_pred1), explained_variance_score(y_test1, y_pred1), max_error(y_test1, y_pred1), mean_absolute_error(y_test1, y_pred1), mean_squared_error(y_test1, y_pred1), mean_squared_log_error(y_test1, y_pred1), median_absolute_error(y_test1, y_pred1), r2_score(y_test1, y_pred1), mean_poisson_deviance(y_test1, y_pred1), mean_gamma_deviance(y_test1, y_pred1), mean_tweedie_deviance(y_test1, y_pred1, power=1))
code
89124318/cell_29
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression regressor1 = LinearRegression() regressor1.fit(X_train1, y_train1)
code
89124318/cell_39
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(X_train1) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly, y_train1) plt.scatter(X_test1, y_test1, color='red') plt.plot(X_test1, lin_reg_2.predict(poly_reg.fit_transform(X_test1)), color='blue') plt.title('Truth or Bluff(polynomial Regression)') plt.xlabel('Position_level') plt.ylabel('Salary') plt.show()
code
89124318/cell_26
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) mean_squared_error(y_test, y_pred)
code
89124318/cell_41
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(X_train1) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly, y_train1) valtopredictn2 = y_test1.reshape(-1, 1) y_pred3 = lin_reg_2.predict(poly_reg.fit_transform(valtopredictn2)) mean_squared_error(y_test1, y_pred3)
code
89124318/cell_19
[ "text_plain_output_1.png" ]
print('Size of Xtrain', X_train.shape) print('Length of ytrain', len(y_train)) print('Size of Xtest', X_test.shape) print('Length of ytest', len(y_test))
code
89124318/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89124318/cell_7
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/linear-regression-dataset/Linear Regression - Sheet1.csv') df.head()
code
89124318/cell_45
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.svm import SVR import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(X_train1) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly, y_train1) valtopredictn2 = y_test1.reshape(-1, 1) y_pred3 = lin_reg_2.predict(poly_reg.fit_transform(valtopredictn2)) from sklearn.svm import SVR regressor2 = SVR(kernel='rbf') regressor2.fit(X_train1, y_train1) y_pred2 = regressor2.predict(valtopredictn2) mean_squared_error(y_test1, y_pred2)
code
89124318/cell_8
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/linear-regression-dataset/Linear Regression - Sheet1.csv') df.info()
code
89124318/cell_38
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(X_train1) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly, y_train1) plt.scatter(X_train1, y_train1, color='red') plt.plot(X_train1, lin_reg_2.predict(poly_reg.fit_transform(X_train1)), color='blue') plt.title('Truth or Bluff(polynomial Regression)') plt.xlabel('Position_level') plt.ylabel('Salary') plt.show()
code
89124318/cell_43
[ "image_output_1.png" ]
from sklearn.svm import SVR from sklearn.svm import SVR regressor2 = SVR(kernel='rbf') regressor2.fit(X_train1, y_train1)
code
89124318/cell_46
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.svm import SVR import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(X_train1) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly, y_train1) valtopredictn2 = y_test1.reshape(-1, 1) y_pred3 = lin_reg_2.predict(poly_reg.fit_transform(valtopredictn2)) from sklearn.svm import SVR regressor2 = SVR(kernel='rbf') regressor2.fit(X_train1, y_train1) y_pred2 = regressor2.predict(valtopredictn2) plt.scatter(X_train1, y_train1, color='red') plt.plot(X_train1, regressor2.predict(X_train1), color='blue') plt.title('Truth or Bluff(polynomial Regression)') plt.xlabel('Position_level') plt.ylabel('Salary') plt.show()
code
89124318/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) y_pred = regressor.predict(X_test) plt.scatter(X_test, y_test, color='red') plt.plot(X_test, regressor.predict(X_test), color='blue') plt.title('Salary vs Experience (Test set)') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.show()
code
89124318/cell_14
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/linear-regression-dataset/Linear Regression - Sheet1.csv') standard_scaler = StandardScaler() standardized_data = standard_scaler.fit_transform(df) pd.DataFrame(standardized_data, columns=df.columns)
code
89124318/cell_37
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import PolynomialFeatures poly_reg = PolynomialFeatures(degree=2) X_poly = poly_reg.fit_transform(X_train1) lin_reg_2 = LinearRegression() lin_reg_2.fit(X_poly, y_train1)
code
89124318/cell_36
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X_train1, y_train1)
code
16148029/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values plt.figure() plt.imshow(test_images[0].reshape(28, 28)) plt.colorbar() plt.xticks([]) plt.yticks([]) plt.show()
code
16148029/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.figure() plt.imshow(train_images.iloc[0].as_matrix().reshape(28, 28)) plt.colorbar() plt.xticks([]) plt.yticks([]) plt.show() print(class_names[0])
code
16148029/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape
code
16148029/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import warnings from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values model.fit(train_images, train_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) predictions = model.predict(test_images) np.argmax(predictions[0]) plt.colorbar() plt.xticks([]) plt.yticks([]) x = np.argmax(predictions[999]) plt.figure() plt.imshow(test_images[999].reshape(28, 28)) plt.colorbar() plt.xticks([]) plt.yticks([]) plt.show() print(class_names[x])
code
16148029/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values print(test_labels[0]) print(class_names[0])
code
16148029/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape
code
16148029/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import warnings from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values model.fit(train_images, train_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) predictions = model.predict(test_images) np.argmax(predictions[0])
code
16148029/cell_1
[ "text_plain_output_1.png" ]
import tensorflow as tf import warnings from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt print(tf.__version__)
code
16148029/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import warnings from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values model.fit(train_images, train_labels, epochs=10)
code
16148029/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.head()
code
16148029/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import warnings from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values model.fit(train_images, train_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) print('The test accuracy is: {} and test loss is: {}'.format(test_acc, test_loss))
code