path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2028270/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
wine_df = pd.read_csv('../input/winemag-data_first150k.csv')
wine_df.drop('region_1', axis=1, inplace=True)
wine_df.drop('region_2', axis=1, inplace=True)
wine_df.drop('description', axis=1, inplace=True)
wine_df.drop('designation', axis=1, inplace=True)
newdf = wine_df[(wine_df['cheaper'] == 'yes') & (wine_df['quality'] == 'yes')]
newdf['variety'].value_counts().plot(kind='bar') | code |
2028270/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
wine_df = pd.read_csv('../input/winemag-data_first150k.csv')
wine_df.drop('region_1', axis=1, inplace=True)
wine_df.drop('region_2', axis=1, inplace=True)
wine_df.drop('description', axis=1, inplace=True)
wine_df.drop('designation', axis=1, inplace=True)
wine_df['reds'] = 'no'
wine_df['reds'][wine_df['variety'].str.contains('Red', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Cabernet', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Pinot Noir', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Syrah', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Malbec', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Sangiovese', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Merlot', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Grenache', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Shiraz', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Pinotage', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Monastrell', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Tempranillo', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Claret', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Mourvèdre', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Verdot', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Dolcetto', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('Carmenère', case=False)] = 'yes'
wine_df['reds'][wine_df['variety'].str.contains('G-S-M', case=False)] = 'yes' | code |
2028270/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
wine_df = pd.read_csv('../input/winemag-data_first150k.csv')
wine_df.drop('region_1', axis=1, inplace=True)
wine_df.drop('region_2', axis=1, inplace=True)
wine_df.drop('description', axis=1, inplace=True)
wine_df.drop('designation', axis=1, inplace=True)
newdf = wine_df[(wine_df['cheaper'] == 'yes') & (wine_df['quality'] == 'yes')]
newdf.head() | code |
2028270/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
wine_df = pd.read_csv('../input/winemag-data_first150k.csv')
wine_df.drop('region_1', axis=1, inplace=True)
wine_df.drop('region_2', axis=1, inplace=True)
wine_df.drop('description', axis=1, inplace=True)
wine_df.drop('designation', axis=1, inplace=True)
red_df = wine_df[(wine_df['cheaper'] == 'yes') & (wine_df['quality'] == 'yes') & (wine_df['reds'] == 'yes')]
red_df | code |
2028270/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
wine_df = pd.read_csv('../input/winemag-data_first150k.csv')
wine_df.drop('region_1', axis=1, inplace=True)
wine_df.drop('region_2', axis=1, inplace=True)
wine_df.drop('description', axis=1, inplace=True)
wine_df.drop('designation', axis=1, inplace=True)
wine_df['cheaper'] = 'no'
wine_df['cheaper'][wine_df['price'] < 20.0] = 'yes'
wine_df['quality'] = 'no'
wine_df['quality'][wine_df['points'] > 92] = 'yes' | code |
2028270/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
wine_df = pd.read_csv('../input/winemag-data_first150k.csv')
plt.hist(wine_df['points'], bins=15, edgecolor='white') | code |
122260975/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
stop = stopwords.words('english')
stemmer = PorterStemmer()
def clean_text(text):
processed_text = re.sub('(@\\[A-Za-z0-9]+)|([^A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?', '', text)
processed_text = processed_text.lower()
processed_text = [word for word in processed_text.split() if word not in stop]
processed_text = ' '.join([stemmer.stem(word) for word in processed_text])
return processed_text
def preprocessing(df):
df.keyword.fillna('', inplace=True)
df.location.fillna('', inplace=True)
df.text = df.keyword + df.location + df.text
df.text = df.text.apply(lambda text: clean_text(text))
df.drop(columns=['keyword', 'location'], inplace=True)
return df
train_df = preprocessing(train_df)
test_df = preprocessing(test_df)
test_df.head() | code |
122260975/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
test_df.head() | code |
122260975/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import re
import nltk.corpus
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122260975/cell_18 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
stop = stopwords.words('english')
stemmer = PorterStemmer()
def clean_text(text):
processed_text = re.sub('(@\\[A-Za-z0-9]+)|([^A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?', '', text)
processed_text = processed_text.lower()
processed_text = [word for word in processed_text.split() if word not in stop]
processed_text = ' '.join([stemmer.stem(word) for word in processed_text])
return processed_text
def preprocessing(df):
df.keyword.fillna('', inplace=True)
df.location.fillna('', inplace=True)
df.text = df.keyword + df.location + df.text
df.text = df.text.apply(lambda text: clean_text(text))
df.drop(columns=['keyword', 'location'], inplace=True)
return df
train_df = preprocessing(train_df)
test_df = preprocessing(test_df)
X_train = train_df['text']
Y_train = train_df['target']
X_test = test_df['text']
X_all = pd.concat([X_train, X_test])
tfidf_vectorizer = TfidfVectorizer(max_features=15000)
tfidf_vectorizer.fit(X_all)
X_train = tfidf_vectorizer.transform(X_train)
X_test = tfidf_vectorizer.transform(X_test)
X_train = X_train.toarray()
X_test = X_test.toarray()
test_pred = xgb.predict(X_test)
submission = pd.DataFrame({'id': test_df['id'], 'target': test_pred})
submission.to_csv('submission.csv', index=False) | code |
122260975/cell_8 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
stop = stopwords.words('english')
stemmer = PorterStemmer()
def clean_text(text):
processed_text = re.sub('(@\\[A-Za-z0-9]+)|([^A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?', '', text)
processed_text = processed_text.lower()
processed_text = [word for word in processed_text.split() if word not in stop]
processed_text = ' '.join([stemmer.stem(word) for word in processed_text])
return processed_text
def preprocessing(df):
df.keyword.fillna('', inplace=True)
df.location.fillna('', inplace=True)
df.text = df.keyword + df.location + df.text
df.text = df.text.apply(lambda text: clean_text(text))
df.drop(columns=['keyword', 'location'], inplace=True)
return df
train_df = preprocessing(train_df)
test_df = preprocessing(test_df)
train_df.head() | code |
122260975/cell_15 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
stop = stopwords.words('english')
stemmer = PorterStemmer()
def clean_text(text):
processed_text = re.sub('(@\\[A-Za-z0-9]+)|([^A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?', '', text)
processed_text = processed_text.lower()
processed_text = [word for word in processed_text.split() if word not in stop]
processed_text = ' '.join([stemmer.stem(word) for word in processed_text])
return processed_text
def preprocessing(df):
df.keyword.fillna('', inplace=True)
df.location.fillna('', inplace=True)
df.text = df.keyword + df.location + df.text
df.text = df.text.apply(lambda text: clean_text(text))
df.drop(columns=['keyword', 'location'], inplace=True)
return df
train_df = preprocessing(train_df)
test_df = preprocessing(test_df)
X_train = train_df['text']
Y_train = train_df['target']
X_test = test_df['text']
X_all = pd.concat([X_train, X_test])
tfidf_vectorizer = TfidfVectorizer(max_features=15000)
tfidf_vectorizer.fit(X_all)
X_train = tfidf_vectorizer.transform(X_train)
X_test = tfidf_vectorizer.transform(X_test)
X_train = X_train.toarray()
X_test = X_test.toarray()
random_forest = RandomForestClassifier()
random_forest.fit(X_train, Y_train) | code |
122260975/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
print(train_df.shape)
print(test_df.shape) | code |
122260975/cell_17 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, confusion_matrix
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
stop = stopwords.words('english')
stemmer = PorterStemmer()
def clean_text(text):
processed_text = re.sub('(@\\[A-Za-z0-9]+)|([^A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?', '', text)
processed_text = processed_text.lower()
processed_text = [word for word in processed_text.split() if word not in stop]
processed_text = ' '.join([stemmer.stem(word) for word in processed_text])
return processed_text
def preprocessing(df):
df.keyword.fillna('', inplace=True)
df.location.fillna('', inplace=True)
df.text = df.keyword + df.location + df.text
df.text = df.text.apply(lambda text: clean_text(text))
df.drop(columns=['keyword', 'location'], inplace=True)
return df
train_df = preprocessing(train_df)
test_df = preprocessing(test_df)
X_train = train_df['text']
Y_train = train_df['target']
X_test = test_df['text']
X_all = pd.concat([X_train, X_test])
tfidf_vectorizer = TfidfVectorizer(max_features=15000)
tfidf_vectorizer.fit(X_all)
X_train = tfidf_vectorizer.transform(X_train)
X_test = tfidf_vectorizer.transform(X_test)
X_train = X_train.toarray()
X_test = X_test.toarray()
logreg = LogisticRegression(random_state=0).fit(X_train, Y_train)
from sklearn.metrics import f1_score, confusion_matrix
y_pred = logreg.predict(X_train)
print(f1_score(y_pred, Y_train))
print(confusion_matrix(y_pred, Y_train)) | code |
122260975/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
train_df.head() | code |
17108514/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
reviews = pd.read_csv('../input/ml-1m/ml-1m/ratings.dat', names=['userID', 'movieID', 'rating', 'time'], delimiter='::', engine='python')
rts_gp = reviews.groupby(by=['rating']).agg({'userID': 'count'}).reset_index()
rts_gp.columns = ['Rating', 'Count']
plt.barh(rts_gp.Rating, rts_gp.Count, color='royalblue')
plt.title('Overall Count of Ratings', fontsize=15)
plt.xlabel('Count', fontsize=15)
plt.ylabel('Rating', fontsize=15)
plt.grid(ls='dotted')
plt.show() | code |
17108514/cell_23 | [
"text_plain_output_1.png"
] | from surprise import KNNBasic, KNNWithMeans, KNNWithZScore
algoritmo = KNNBasic(k=50, sim_options={'name': 'pearson', 'user_based': True, 'verbose': True})
algoritmo.fit(trainset)
uid = str(49)
iid = str(2058)
print('Prediction for rating: ')
pred = algoritmo.predict(uid, iid, r_ui=4, verbose=True) | code |
17108514/cell_29 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from surprise import KNNBasic, KNNWithMeans, KNNWithZScore
from surprise import accuracy
algoritmo = KNNBasic(k=50, sim_options={'name': 'pearson', 'user_based': True, 'verbose': True})
algoritmo.fit(trainset)
uid = str(49)
iid = str(2058)
pred = algoritmo.predict(uid, iid, r_ui=4, verbose=True)
test_pred = algoritmo.test(testset)
accuracy.rmse(test_pred, verbose=True)
print('Analisys MAE: ')
accuracy.mae(test_pred, verbose=True) | code |
17108514/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
reviews = pd.read_csv('../input/ml-1m/ml-1m/ratings.dat', names=['userID', 'movieID', 'rating', 'time'], delimiter='::', engine='python')
print('No. of Unique Users :', reviews.userID.nunique())
print('No. of Unique Movies :', reviews.movieID.nunique())
print('No. of Unique Ratings :', reviews.rating.nunique()) | code |
17108514/cell_17 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from surprise import KNNBasic, KNNWithMeans, KNNWithZScore
algoritmo = KNNBasic(k=50, sim_options={'name': 'pearson', 'user_based': True, 'verbose': True})
algoritmo.fit(trainset) | code |
17108514/cell_31 | [
"text_plain_output_1.png"
] | from surprise import KNNBasic, KNNWithMeans, KNNWithZScore
from surprise import accuracy
algoritmo = KNNBasic(k=50, sim_options={'name': 'pearson', 'user_based': True, 'verbose': True})
algoritmo.fit(trainset)
uid = str(49)
iid = str(2058)
pred = algoritmo.predict(uid, iid, r_ui=4, verbose=True)
test_pred = algoritmo.test(testset)
accuracy.rmse(test_pred, verbose=True)
accuracy.mae(test_pred, verbose=True)
algoritmo = KNNWithMeans(k=50, sim_options={'name': 'cosine', 'user_based': False, 'verbose': True})
algoritmo.fit(trainset)
uid = str(49)
iid = str(2058)
pred = algoritmo.predict(uid, iid, r_ui=4, verbose=True)
test_pred = algoritmo.test(testset)
print('Deviation RMSE: ')
accuracy.rmse(test_pred, verbose=True)
print('Analisys MAE: ')
accuracy.mae(test_pred, verbose=True) | code |
17108514/cell_27 | [
"image_output_1.png"
] | from surprise import KNNBasic, KNNWithMeans, KNNWithZScore
from surprise import accuracy
algoritmo = KNNBasic(k=50, sim_options={'name': 'pearson', 'user_based': True, 'verbose': True})
algoritmo.fit(trainset)
uid = str(49)
iid = str(2058)
pred = algoritmo.predict(uid, iid, r_ui=4, verbose=True)
test_pred = algoritmo.test(testset)
print('Deviation RMSE: ')
accuracy.rmse(test_pred, verbose=True) | code |
17108514/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
reviews = pd.read_csv('../input/ml-1m/ml-1m/ratings.dat', names=['userID', 'movieID', 'rating', 'time'], delimiter='::', engine='python')
print('Rows:', reviews.shape[0], '; Columns:', reviews.shape[1], '\n')
reviews.head() | code |
104124423/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
print('Number of null values in training set:', train_data.isnull().sum().sum())
print('')
print('Number of null values in test set:', test_data.isnull().sum().sum()) | code |
104124423/cell_33 | [
"text_plain_output_1.png"
] | from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.nn as nn
import torch.optim as optim
BATCH_SIZE = 16
LEARNING_RATE = 0.001
N_EPOCHS = 20
LAYER1_SIZE = 256
LAYER2_SIZE = 256
DROPOUT_RATE = 0.3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
# Figure size
plt.figure(figsize=(8,8))
# Subplot
for i in range(9):
img = np.asarray(train_data.iloc[i+180,1:].values.reshape((28,28))/255)
ax=plt.subplot(3, 3, i+1)
ax.grid(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.title.set_text(f'{train_data.iloc[i+18,0]}')
plt.imshow(img, cmap='gray')
plt.show()
class MNIST(Dataset):
def __init__(self, subset='train'):
super().__init__()
self.subset = subset
if self.subset == 'train':
self.X = torch.from_numpy(X_train.values.astype(np.float32))
self.y = torch.from_numpy(y_train.values)
elif self.subset == 'valid':
self.X = torch.from_numpy(X_valid.values.astype(np.float32))
self.y = torch.from_numpy(y_valid.values)
elif self.subset == 'test':
self.X = torch.from_numpy(test_data.values.astype(np.float32))
else:
raise Exception('subset must be train, valid or test')
def __getitem__(self, index):
if self.subset == 'test':
return self.X[index]
else:
return (self.X[index], self.y[index])
def __len__(self):
return self.X.shape[0]
train_dataset = MNIST(subset='train')
valid_dataset = MNIST(subset='valid')
test_dataset = MNIST(subset='test')
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False)
class NeuralNet(nn.Module):
def __init__(self, layer1_size=LAYER1_SIZE, layer2_size=LAYER2_SIZE, dropout_rate=DROPOUT_RATE):
super().__init__()
self.lin1 = nn.Linear(in_features=784, out_features=layer1_size)
self.lin2 = nn.Linear(in_features=layer1_size, out_features=layer2_size)
self.lin3 = nn.Linear(in_features=layer2_size, out_features=10)
self.relu = nn.ReLU()
self.drop = nn.Dropout(p=dropout_rate)
def forward(self, x):
out = self.lin1(x)
out = self.relu(out)
out = self.drop(out)
out = self.lin2(out)
out = self.relu(out)
out = self.drop(out)
out = self.lin3(out)
return out
model = NeuralNet().to(device)
loss = nn.CrossEntropyLoss()
optimiser = optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
loss_hist = []
val_loss_hist = []
for epoch in range(N_EPOCHS):
loss_acc = 0
val_loss_acc = 0
train_count = 0
valid_count = 0
for imgs, labels in train_loader:
imgs = imgs.to(device)
labels = labels.to(device)
preds = model(imgs)
L = loss(preds, labels)
L.backward()
optimiser.step()
optimiser.zero_grad()
loss_acc += L.detach().item()
train_count += 1
with torch.no_grad():
for val_imgs, val_labels in valid_loader:
val_imgs = val_imgs.to(device)
val_labels = val_labels.to(device)
val_preds = model(val_imgs)
val_L = loss(val_preds, val_labels)
val_loss_acc += val_L.item()
valid_count += 1
loss_hist.append(loss_acc / train_count)
val_loss_hist.append(val_loss_acc / valid_count)
plt.figure(figsize=(10, 5))
plt.plot(loss_hist, c='C0', label='loss')
plt.plot(val_loss_hist, c='C1', label='val_loss')
plt.title('Cross entropy loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show() | code |
104124423/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
print(train_data.shape)
train_data.head(3) | code |
104124423/cell_7 | [
"image_output_1.png"
] | import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device | code |
104124423/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
plt.figure(figsize=(8, 8))
for i in range(9):
img = np.asarray(train_data.iloc[i + 180, 1:].values.reshape((28, 28)) / 255)
ax = plt.subplot(3, 3, i + 1)
ax.grid(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.title.set_text(f'{train_data.iloc[i + 18, 0]}')
plt.imshow(img, cmap='gray')
plt.show() | code |
104124423/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
# Figure size
plt.figure(figsize=(8,8))
# Subplot
for i in range(9):
img = np.asarray(train_data.iloc[i+180,1:].values.reshape((28,28))/255)
ax=plt.subplot(3, 3, i+1)
ax.grid(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.title.set_text(f'{train_data.iloc[i+18,0]}')
plt.imshow(img, cmap='gray')
plt.show()
plt.figure(figsize=(8, 4))
sns.countplot(x='label', data=train_data)
plt.title('Distribution of labels in training set') | code |
104124423/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
BATCH_SIZE = 16
LEARNING_RATE = 0.001
N_EPOCHS = 20
LAYER1_SIZE = 256
LAYER2_SIZE = 256
DROPOUT_RATE = 0.3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
# Figure size
plt.figure(figsize=(8,8))
# Subplot
for i in range(9):
img = np.asarray(train_data.iloc[i+180,1:].values.reshape((28,28))/255)
ax=plt.subplot(3, 3, i+1)
ax.grid(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.title.set_text(f'{train_data.iloc[i+18,0]}')
plt.imshow(img, cmap='gray')
plt.show()
class MNIST(Dataset):
def __init__(self, subset='train'):
super().__init__()
self.subset = subset
if self.subset == 'train':
self.X = torch.from_numpy(X_train.values.astype(np.float32))
self.y = torch.from_numpy(y_train.values)
elif self.subset == 'valid':
self.X = torch.from_numpy(X_valid.values.astype(np.float32))
self.y = torch.from_numpy(y_valid.values)
elif self.subset == 'test':
self.X = torch.from_numpy(test_data.values.astype(np.float32))
else:
raise Exception('subset must be train, valid or test')
def __getitem__(self, index):
if self.subset == 'test':
return self.X[index]
else:
return (self.X[index], self.y[index])
def __len__(self):
return self.X.shape[0]
train_dataset = MNIST(subset='train')
valid_dataset = MNIST(subset='valid')
test_dataset = MNIST(subset='test')
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False)
class NeuralNet(nn.Module):
def __init__(self, layer1_size=LAYER1_SIZE, layer2_size=LAYER2_SIZE, dropout_rate=DROPOUT_RATE):
super().__init__()
self.lin1 = nn.Linear(in_features=784, out_features=layer1_size)
self.lin2 = nn.Linear(in_features=layer1_size, out_features=layer2_size)
self.lin3 = nn.Linear(in_features=layer2_size, out_features=10)
self.relu = nn.ReLU()
self.drop = nn.Dropout(p=dropout_rate)
def forward(self, x):
out = self.lin1(x)
out = self.relu(out)
out = self.drop(out)
out = self.lin2(out)
out = self.relu(out)
out = self.drop(out)
out = self.lin3(out)
return out
model = NeuralNet().to(device)
loss = nn.CrossEntropyLoss()
optimiser = optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
loss_hist = []
val_loss_hist = []
for epoch in range(N_EPOCHS):
loss_acc = 0
val_loss_acc = 0
train_count = 0
valid_count = 0
for imgs, labels in train_loader:
imgs = imgs.to(device)
labels = labels.to(device)
preds = model(imgs)
L = loss(preds, labels)
L.backward()
optimiser.step()
optimiser.zero_grad()
loss_acc += L.detach().item()
train_count += 1
with torch.no_grad():
for val_imgs, val_labels in valid_loader:
val_imgs = val_imgs.to(device)
val_labels = val_labels.to(device)
val_preds = model(val_imgs)
val_L = loss(val_preds, val_labels)
val_loss_acc += val_L.item()
valid_count += 1
loss_hist.append(loss_acc / train_count)
val_loss_hist.append(val_loss_acc / valid_count)
if (epoch + 1) % 5 == 0:
print(f'Epoch {epoch + 1}/{N_EPOCHS}, loss {loss_acc / train_count:.5f}, val_loss {val_loss_acc / valid_count:.5f}') | code |
104124423/cell_36 | [
"image_output_1.png"
] | from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
BATCH_SIZE = 16
LEARNING_RATE = 0.001
N_EPOCHS = 20
LAYER1_SIZE = 256
LAYER2_SIZE = 256
DROPOUT_RATE = 0.3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv') / 255
# Figure size
plt.figure(figsize=(8,8))
# Subplot
for i in range(9):
img = np.asarray(train_data.iloc[i+180,1:].values.reshape((28,28))/255)
ax=plt.subplot(3, 3, i+1)
ax.grid(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.title.set_text(f'{train_data.iloc[i+18,0]}')
plt.imshow(img, cmap='gray')
plt.show()
class MNIST(Dataset):
def __init__(self, subset='train'):
super().__init__()
self.subset = subset
if self.subset == 'train':
self.X = torch.from_numpy(X_train.values.astype(np.float32))
self.y = torch.from_numpy(y_train.values)
elif self.subset == 'valid':
self.X = torch.from_numpy(X_valid.values.astype(np.float32))
self.y = torch.from_numpy(y_valid.values)
elif self.subset == 'test':
self.X = torch.from_numpy(test_data.values.astype(np.float32))
else:
raise Exception('subset must be train, valid or test')
def __getitem__(self, index):
if self.subset == 'test':
return self.X[index]
else:
return (self.X[index], self.y[index])
def __len__(self):
return self.X.shape[0]
train_dataset = MNIST(subset='train')
valid_dataset = MNIST(subset='valid')
test_dataset = MNIST(subset='test')
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False)
class NeuralNet(nn.Module):
def __init__(self, layer1_size=LAYER1_SIZE, layer2_size=LAYER2_SIZE, dropout_rate=DROPOUT_RATE):
super().__init__()
self.lin1 = nn.Linear(in_features=784, out_features=layer1_size)
self.lin2 = nn.Linear(in_features=layer1_size, out_features=layer2_size)
self.lin3 = nn.Linear(in_features=layer2_size, out_features=10)
self.relu = nn.ReLU()
self.drop = nn.Dropout(p=dropout_rate)
def forward(self, x):
out = self.lin1(x)
out = self.relu(out)
out = self.drop(out)
out = self.lin2(out)
out = self.relu(out)
out = self.drop(out)
out = self.lin3(out)
return out
model = NeuralNet().to(device)
loss = nn.CrossEntropyLoss()
optimiser = optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
loss_hist = []
val_loss_hist = []
for epoch in range(N_EPOCHS):
loss_acc = 0
val_loss_acc = 0
train_count = 0
valid_count = 0
for imgs, labels in train_loader:
imgs = imgs.to(device)
labels = labels.to(device)
preds = model(imgs)
L = loss(preds, labels)
L.backward()
optimiser.step()
optimiser.zero_grad()
loss_acc += L.detach().item()
train_count += 1
with torch.no_grad():
for val_imgs, val_labels in valid_loader:
val_imgs = val_imgs.to(device)
val_labels = val_labels.to(device)
val_preds = model(val_imgs)
val_L = loss(val_preds, val_labels)
val_loss_acc += val_L.item()
valid_count += 1
loss_hist.append(loss_acc / train_count)
val_loss_hist.append(val_loss_acc / valid_count)
model.eval()
with torch.no_grad():
n_correct = 0
n_samples = 0
n_class_correct = [0 for i in range(10)]
n_class_sample = [0 for i in range(10)]
for imgs, labels in valid_loader:
imgs = imgs.to(device)
labels = labels.to(device)
output = model(imgs)
_, preds = torch.max(output, 1)
n_samples += labels.shape[0]
n_correct += (preds == labels).sum().item()
for i in range(BATCH_SIZE):
try:
label = labels[i].item()
pred = preds[i].item()
except:
break
if label == pred:
n_class_correct[label] += 1
n_class_sample[label] += 1
acc = 100 * n_correct / n_samples
print(f'Overall accuracy on test set: {acc:.1f} %')
for i in range(10):
print(f'Accuracy of {i}: {100 * n_class_correct[i] / n_class_sample[i]:.1f} %') | code |
105195130/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/iris-dataset/iris.data.csv')
data.describe().T
X = data.iloc[:, [0, 1, 2, 3]].values
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=200, n_init=10, random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('THE ELBOW METHOD')
plt.xlabel('Number of Clusters')
plt.ylabel('wcss')
plt.show() | code |
105195130/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/iris-dataset/iris.data.csv')
data.describe() | code |
105195130/cell_6 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/iris-dataset/iris.data.csv')
data.describe().T
data['Iris-setosa'].unique() | code |
105195130/cell_11 | [
"text_html_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/iris-dataset/iris.data.csv')
data.describe().T
X = data.iloc[:, [0, 1, 2, 3]].values
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=200, n_init=10, random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
kmeans = KMeans(n_clusters=3, init='k-means++', max_iter=200, n_init=10, random_state=0)
y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s=100, c='red', label='Iris-setosa')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s=100, c='green', label='Iris-versicolor')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s=100, c='blue', label='Iris-virginica')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=100, c='yellow', label='centroids')
plt.title('K-means Iris Dataset')
plt.legend() | code |
105195130/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/iris-dataset/iris.data.csv')
data.head() | code |
105195130/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/iris-dataset/iris.data.csv')
data.describe().T | code |
328019/cell_4 | [
"text_plain_output_1.png"
] | import hashlib
import os
import pandas as pd
import numpy as np
import pandas as pd
import os
import hashlib
records = []
for name in os.listdir('../input/train/'):
if 'mask' in name or not name.endswith('.tif'):
continue
patient_id, image_id = name.strip('.tif').split('_')
with open('../input/train/' + name, 'rb') as fd:
md5sum = hashlib.md5(fd.read()).hexdigest()
records.append({'filename': name, 'patient_id': patient_id, 'image_id': image_id, 'md5sum': md5sum})
df = pd.DataFrame.from_records(records)
counts = df.groupby('md5sum')['filename'].count()
duplicates = counts[counts > 1]
duplicates[duplicates > 2] | code |
328019/cell_3 | [
"text_plain_output_1.png"
] | import hashlib
import os
import pandas as pd
import numpy as np
import pandas as pd
import os
import hashlib
records = []
for name in os.listdir('../input/train/'):
if 'mask' in name or not name.endswith('.tif'):
continue
patient_id, image_id = name.strip('.tif').split('_')
with open('../input/train/' + name, 'rb') as fd:
md5sum = hashlib.md5(fd.read()).hexdigest()
records.append({'filename': name, 'patient_id': patient_id, 'image_id': image_id, 'md5sum': md5sum})
df = pd.DataFrame.from_records(records)
counts = df.groupby('md5sum')['filename'].count()
duplicates = counts[counts > 1]
print(len(duplicates)) | code |
328019/cell_5 | [
"text_plain_output_1.png"
] | import hashlib
import os
import pandas as pd
import numpy as np
import pandas as pd
import os
import hashlib
records = []
for name in os.listdir('../input/train/'):
if 'mask' in name or not name.endswith('.tif'):
continue
patient_id, image_id = name.strip('.tif').split('_')
with open('../input/train/' + name, 'rb') as fd:
md5sum = hashlib.md5(fd.read()).hexdigest()
records.append({'filename': name, 'patient_id': patient_id, 'image_id': image_id, 'md5sum': md5sum})
df = pd.DataFrame.from_records(records)
counts = df.groupby('md5sum')['filename'].count()
duplicates = counts[counts > 1]
for md5sum in duplicates.index:
subset = df[df['md5sum'] == md5sum]
if len(subset['patient_id'].value_counts()) > 1:
print(subset)
print('------') | code |
17096995/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import datetime
import glob
import math
import numpy as np
import pandas as pd
station = 68241
def Datastations(station, path):
allFiles = glob.glob(path + '/*.csv')
list_ = []
array = ['T', 'MinT', 'MaxT', 'Precip', 'AIR_TEMP', 'AIR_TEMP_MIN', 'AIR_TEMP_MAX', 'PRCP']
for file_ in allFiles:
df = pd.read_csv(file_, index_col=None, header=0)
df = df[df.station_number == station]
df = df.loc[df['parameter'].isin(array)]
list_.append(df)
frame = pd.concat(list_, ignore_index=True)
return frame
df1 = Datastations(station, 'refdata/obs')
df2 = Datastations(station, 'refdata/BoM_ETA_20160501-20170430/obs')
df1['valid_start'] = df1['valid_start'].apply(pd.to_numeric)
df1['valid_end'] = df1['valid_end'].apply(pd.to_numeric)
df1['valid_start'] = df1['valid_start'] + 36000
df1['valid_end'] = df1['valid_end'] + 36000
df1['valid_start'] = pd.to_datetime(df1['valid_start'], unit='s')
df1['valid_end'] = pd.to_datetime(df1['valid_end'], unit='s')
df2['valid_start'] = df2['valid_start'].apply(pd.to_numeric)
df2['valid_end'] = df2['valid_end'].apply(pd.to_numeric)
df2['valid_start'] = df2['valid_start'] + 36000
df2['valid_end'] = df2['valid_end'] + 36000
df2.loc[df2.parameter == 'AIR_TEMP', 'valid_end'] = df2['valid_end'] + 3600
df2.loc[df2.parameter == 'PRCP', 'valid_end'] = df2['valid_end'] + 3000
df2['valid_start'] = pd.to_datetime(df2['valid_start'], unit='s')
df2['valid_end'] = pd.to_datetime(df2['valid_end'], unit='s')
df1['T_Celsius'] = np.where(df1['parameter'] == 'T', df1['value'], '')
df1['MinT_Celsius'] = np.where(df1['parameter'] == 'MinT', df1['value'], '')
df1['MaxT_Celsius'] = np.where(df1['parameter'] == 'MaxT', df1['value'], '')
df1['Precip_mm'] = np.where(df1['parameter'] == 'Precip', df1['value'], '')
df1 = df1.drop(['area_code', 'unit', 'statistic', 'level', 'qc_valid_minutes', 'parameter', 'value', 'qc_valid_start', 'qc_valid_end'], axis=1)
df1 = df1.groupby(['valid_start', 'valid_end', 'station_number'])['T_Celsius', 'MinT_Celsius', 'MaxT_Celsius', 'Precip_mm'].sum().reset_index()
df2['T_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP', df2['value'], '')
df2['MinT_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP_MIN', df2['value'], '')
df2['MaxT_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP_MAX', df2['value'], '')
df2['Precip_mm'] = np.where(df2['parameter'] == 'PRCP', df2['value'], '')
df2 = df2.drop(['area_code', 'unit', 'statistic', 'level', 'qc_valid_minutes', 'parameter', 'value', 'instantaneous', 'qc_valid_minutes_start', 'qc_valid_minutes_end'], axis=1)
df2 = df2.groupby(['valid_start', 'valid_end', 'station_number'])['T_Celsius', 'MinT_Celsius', 'MaxT_Celsius', 'Precip_mm'].sum().reset_index()
df3 = df2.append(df1, ignore_index=True)
df3 = df3.resample('60Min', on='valid_start').first().drop('valid_start', 1).reset_index()
df4 = df3.drop(df3[(df3.valid_start < '2017-01-01 00:00:00') | (df3.valid_start > '2017-12-31 23:00:00')].index)
df4['valid_end'] = df4['valid_start'] + datetime.timedelta(0, 3600)
df4['station_number'] = station
df4 = df4.replace('', np.nan, regex=True)
df4['T_Celsius'] = df4['T_Celsius'].apply(pd.to_numeric)
df4['MinT_Celsius'] = df4['MinT_Celsius'].apply(pd.to_numeric)
df4['MaxT_Celsius'] = df4['MaxT_Celsius'].apply(pd.to_numeric)
df4['Precip_mm'] = df4['Precip_mm'].apply(pd.to_numeric)
def Datagap(parameter):
k = 0
for i in range(len(df4)):
if math.isnan(df4[parameter].values[i]):
k = k + 1
if k >= 120:
k = 0
else:
k = 0
Datagap('T_Celsius')
Datagap('MinT_Celsius')
Datagap('MaxT_Celsius')
Datagap('Precip_mm')
def Datafilling(parameter):
for i in range(len(df4)):
j = 0
if math.isnan(df4[parameter].values[i]):
while j < 6:
j = j + 1
if math.isnan(df4[parameter].values[i - j * 24]) == False:
df4[parameter].values[i] = df4[parameter].values[i - j * 24]
j = 6
elif math.isnan(df4[parameter].values[i + j * 24]) == False:
df4[parameter].values[i] = df4[parameter].values[i + j * 24]
j = 6
Datafilling('T_Celsius')
Datafilling('MinT_Celsius')
Datafilling('MaxT_Celsius')
Datafilling('Precip_mm')
print(df4.isna().sum())
if df4.isnull().values.any():
print('Warning: datagap for a given hour > 5 days: check your data')
df4 = df4.round({'T_Celsius': 1, 'MinT_Celsius': 1, 'MaxT_Celsius': 1, 'Precip_mm': 1})
df4.to_csv('csv files/dapto_test.csv', index=False) | code |
17096995/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import glob
import numpy as np
import pandas as pd
station = 68241
def Datastations(station, path):
allFiles = glob.glob(path + '/*.csv')
list_ = []
array = ['T', 'MinT', 'MaxT', 'Precip', 'AIR_TEMP', 'AIR_TEMP_MIN', 'AIR_TEMP_MAX', 'PRCP']
for file_ in allFiles:
df = pd.read_csv(file_, index_col=None, header=0)
df = df[df.station_number == station]
df = df.loc[df['parameter'].isin(array)]
list_.append(df)
frame = pd.concat(list_, ignore_index=True)
return frame
df1 = Datastations(station, 'refdata/obs')
df2 = Datastations(station, 'refdata/BoM_ETA_20160501-20170430/obs')
df1['valid_start'] = df1['valid_start'].apply(pd.to_numeric)
df1['valid_end'] = df1['valid_end'].apply(pd.to_numeric)
df1['valid_start'] = df1['valid_start'] + 36000
df1['valid_end'] = df1['valid_end'] + 36000
df1['valid_start'] = pd.to_datetime(df1['valid_start'], unit='s')
df1['valid_end'] = pd.to_datetime(df1['valid_end'], unit='s')
df2['valid_start'] = df2['valid_start'].apply(pd.to_numeric)
df2['valid_end'] = df2['valid_end'].apply(pd.to_numeric)
df2['valid_start'] = df2['valid_start'] + 36000
df2['valid_end'] = df2['valid_end'] + 36000
df2.loc[df2.parameter == 'AIR_TEMP', 'valid_end'] = df2['valid_end'] + 3600
df2.loc[df2.parameter == 'PRCP', 'valid_end'] = df2['valid_end'] + 3000
df2['valid_start'] = pd.to_datetime(df2['valid_start'], unit='s')
df2['valid_end'] = pd.to_datetime(df2['valid_end'], unit='s')
df1['T_Celsius'] = np.where(df1['parameter'] == 'T', df1['value'], '')
df1['MinT_Celsius'] = np.where(df1['parameter'] == 'MinT', df1['value'], '')
df1['MaxT_Celsius'] = np.where(df1['parameter'] == 'MaxT', df1['value'], '')
df1['Precip_mm'] = np.where(df1['parameter'] == 'Precip', df1['value'], '')
df1 = df1.drop(['area_code', 'unit', 'statistic', 'level', 'qc_valid_minutes', 'parameter', 'value', 'qc_valid_start', 'qc_valid_end'], axis=1)
df1 = df1.groupby(['valid_start', 'valid_end', 'station_number'])['T_Celsius', 'MinT_Celsius', 'MaxT_Celsius', 'Precip_mm'].sum().reset_index()
print(len(df1))
print(df1.head())
df2['T_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP', df2['value'], '')
df2['MinT_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP_MIN', df2['value'], '')
df2['MaxT_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP_MAX', df2['value'], '')
df2['Precip_mm'] = np.where(df2['parameter'] == 'PRCP', df2['value'], '')
df2 = df2.drop(['area_code', 'unit', 'statistic', 'level', 'qc_valid_minutes', 'parameter', 'value', 'instantaneous', 'qc_valid_minutes_start', 'qc_valid_minutes_end'], axis=1)
df2 = df2.groupby(['valid_start', 'valid_end', 'station_number'])['T_Celsius', 'MinT_Celsius', 'MaxT_Celsius', 'Precip_mm'].sum().reset_index()
print(len(df2))
print(df2.head()) | code |
17096995/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import datetime
import glob
import math
import numpy as np
import pandas as pd
station = 68241
def Datastations(station, path):
allFiles = glob.glob(path + '/*.csv')
list_ = []
array = ['T', 'MinT', 'MaxT', 'Precip', 'AIR_TEMP', 'AIR_TEMP_MIN', 'AIR_TEMP_MAX', 'PRCP']
for file_ in allFiles:
df = pd.read_csv(file_, index_col=None, header=0)
df = df[df.station_number == station]
df = df.loc[df['parameter'].isin(array)]
list_.append(df)
frame = pd.concat(list_, ignore_index=True)
return frame
df1 = Datastations(station, 'refdata/obs')
df2 = Datastations(station, 'refdata/BoM_ETA_20160501-20170430/obs')
df1['valid_start'] = df1['valid_start'].apply(pd.to_numeric)
df1['valid_end'] = df1['valid_end'].apply(pd.to_numeric)
df1['valid_start'] = df1['valid_start'] + 36000
df1['valid_end'] = df1['valid_end'] + 36000
df1['valid_start'] = pd.to_datetime(df1['valid_start'], unit='s')
df1['valid_end'] = pd.to_datetime(df1['valid_end'], unit='s')
df2['valid_start'] = df2['valid_start'].apply(pd.to_numeric)
df2['valid_end'] = df2['valid_end'].apply(pd.to_numeric)
df2['valid_start'] = df2['valid_start'] + 36000
df2['valid_end'] = df2['valid_end'] + 36000
df2.loc[df2.parameter == 'AIR_TEMP', 'valid_end'] = df2['valid_end'] + 3600
df2.loc[df2.parameter == 'PRCP', 'valid_end'] = df2['valid_end'] + 3000
df2['valid_start'] = pd.to_datetime(df2['valid_start'], unit='s')
df2['valid_end'] = pd.to_datetime(df2['valid_end'], unit='s')
df1['T_Celsius'] = np.where(df1['parameter'] == 'T', df1['value'], '')
df1['MinT_Celsius'] = np.where(df1['parameter'] == 'MinT', df1['value'], '')
df1['MaxT_Celsius'] = np.where(df1['parameter'] == 'MaxT', df1['value'], '')
df1['Precip_mm'] = np.where(df1['parameter'] == 'Precip', df1['value'], '')
df1 = df1.drop(['area_code', 'unit', 'statistic', 'level', 'qc_valid_minutes', 'parameter', 'value', 'qc_valid_start', 'qc_valid_end'], axis=1)
df1 = df1.groupby(['valid_start', 'valid_end', 'station_number'])['T_Celsius', 'MinT_Celsius', 'MaxT_Celsius', 'Precip_mm'].sum().reset_index()
df2['T_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP', df2['value'], '')
df2['MinT_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP_MIN', df2['value'], '')
df2['MaxT_Celsius'] = np.where(df2['parameter'] == 'AIR_TEMP_MAX', df2['value'], '')
df2['Precip_mm'] = np.where(df2['parameter'] == 'PRCP', df2['value'], '')
df2 = df2.drop(['area_code', 'unit', 'statistic', 'level', 'qc_valid_minutes', 'parameter', 'value', 'instantaneous', 'qc_valid_minutes_start', 'qc_valid_minutes_end'], axis=1)
df2 = df2.groupby(['valid_start', 'valid_end', 'station_number'])['T_Celsius', 'MinT_Celsius', 'MaxT_Celsius', 'Precip_mm'].sum().reset_index()
df3 = df2.append(df1, ignore_index=True)
print(len(df3))
df3 = df3.resample('60Min', on='valid_start').first().drop('valid_start', 1).reset_index()
df4 = df3.drop(df3[(df3.valid_start < '2017-01-01 00:00:00') | (df3.valid_start > '2017-12-31 23:00:00')].index)
if len(df4) != 8760:
print('Too many missing data, check your data')
print(df4.head())
print(df4.tail())
df4['valid_end'] = df4['valid_start'] + datetime.timedelta(0, 3600)
df4['station_number'] = station
print(len(df4))
df4 = df4.replace('', np.nan, regex=True)
print(df4.isna().sum())
df4['T_Celsius'] = df4['T_Celsius'].apply(pd.to_numeric)
df4['MinT_Celsius'] = df4['MinT_Celsius'].apply(pd.to_numeric)
df4['MaxT_Celsius'] = df4['MaxT_Celsius'].apply(pd.to_numeric)
df4['Precip_mm'] = df4['Precip_mm'].apply(pd.to_numeric)
def Datagap(parameter):
k = 0
for i in range(len(df4)):
if math.isnan(df4[parameter].values[i]):
k = k + 1
if k >= 120:
print('Warning datagap >= 5 days: check your data', parameter)
k = 0
else:
k = 0
Datagap('T_Celsius')
Datagap('MinT_Celsius')
Datagap('MaxT_Celsius')
Datagap('Precip_mm') | code |
17096995/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import glob
import pandas as pd
station = 68241
def Datastations(station, path):
allFiles = glob.glob(path + '/*.csv')
list_ = []
array = ['T', 'MinT', 'MaxT', 'Precip', 'AIR_TEMP', 'AIR_TEMP_MIN', 'AIR_TEMP_MAX', 'PRCP']
for file_ in allFiles:
df = pd.read_csv(file_, index_col=None, header=0)
df = df[df.station_number == station]
df = df.loc[df['parameter'].isin(array)]
list_.append(df)
frame = pd.concat(list_, ignore_index=True)
return frame
df1 = Datastations(station, 'refdata/obs')
df2 = Datastations(station, 'refdata/BoM_ETA_20160501-20170430/obs')
df1['valid_start'] = df1['valid_start'].apply(pd.to_numeric)
df1['valid_end'] = df1['valid_end'].apply(pd.to_numeric)
df1['valid_start'] = df1['valid_start'] + 36000
df1['valid_end'] = df1['valid_end'] + 36000
df1['valid_start'] = pd.to_datetime(df1['valid_start'], unit='s')
df1['valid_end'] = pd.to_datetime(df1['valid_end'], unit='s')
print(df1['valid_start'].values[1], df1['valid_end'].values[1])
df2['valid_start'] = df2['valid_start'].apply(pd.to_numeric)
df2['valid_end'] = df2['valid_end'].apply(pd.to_numeric)
df2['valid_start'] = df2['valid_start'] + 36000
df2['valid_end'] = df2['valid_end'] + 36000
df2.loc[df2.parameter == 'AIR_TEMP', 'valid_end'] = df2['valid_end'] + 3600
df2.loc[df2.parameter == 'PRCP', 'valid_end'] = df2['valid_end'] + 3000
df2['valid_start'] = pd.to_datetime(df2['valid_start'], unit='s')
df2['valid_end'] = pd.to_datetime(df2['valid_end'], unit='s')
print(df2['valid_start'].values[1], df2['valid_end'].values[1]) | code |
17096995/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import glob
import pandas as pd
station = 68241
def Datastations(station, path):
allFiles = glob.glob(path + '/*.csv')
list_ = []
array = ['T', 'MinT', 'MaxT', 'Precip', 'AIR_TEMP', 'AIR_TEMP_MIN', 'AIR_TEMP_MAX', 'PRCP']
for file_ in allFiles:
df = pd.read_csv(file_, index_col=None, header=0)
df = df[df.station_number == station]
df = df.loc[df['parameter'].isin(array)]
list_.append(df)
frame = pd.concat(list_, ignore_index=True)
return frame
df1 = Datastations(station, 'refdata/obs')
df2 = Datastations(station, 'refdata/BoM_ETA_20160501-20170430/obs')
print(len(df1))
print(df1.head())
print(len(df2))
print(df2.head()) | code |
34148405/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data.loc[df_complete_data['Ticket'] == '113572', ['Embarked']] = 'S'
df_complete_data['Title'] = df_complete_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())
for i in range(len(df_complete_data)):
if df_complete_data.iloc[i, df_complete_data.columns.get_loc('Sex')] == 'male' and df_complete_data.iloc[i, df_complete_data.columns.get_loc('Age')] <= 14:
df_complete_data.iloc[i, df_complete_data.columns.get_loc('Title')] = 1
if df_complete_data.iloc[i, df_complete_data.columns.get_loc('Sex')] == 'female' and df_complete_data.iloc[i, df_complete_data.columns.get_loc('Age')] <= 14:
df_complete_data.iloc[i, df_complete_data.columns.get_loc('Title')] = 2
if df_complete_data.iloc[i, df_complete_data.columns.get_loc('Sex')] == 'male' and df_complete_data.iloc[i, df_complete_data.columns.get_loc('Age')] > 14:
df_complete_data.iloc[i, df_complete_data.columns.get_loc('Title')] = 3
if df_complete_data.iloc[i, df_complete_data.columns.get_loc('Sex')] == 'female' and df_complete_data.iloc[i, df_complete_data.columns.get_loc('Age')] > 14:
df_complete_data.iloc[i, df_complete_data.columns.get_loc('Title')] = 4
df_complete_data[df_complete_data['Age'].isnull()] | code |
34148405/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
df_complete_data['Fare'].isnull().sum() | code |
34148405/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_train_dataset.info() | code |
34148405/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.describe() | code |
34148405/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data['Fare'].isnull().sum() | code |
34148405/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data.loc[df_complete_data['Ticket'] == '113572', ['Embarked']] = 'S'
df_complete_data['Title'] = df_complete_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())
df_complete_data['Title'].value_counts() | code |
34148405/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34148405/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.info() | code |
34148405/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data.loc[df_complete_data['Ticket'] == '113572', ['Embarked']] = 'S'
df_complete_data['Title'] = df_complete_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())
df_complete_data['Title'].unique() | code |
34148405/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape | code |
34148405/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data.loc[df_complete_data['Ticket'] == '113572', ['Embarked']] = 'S'
df_complete_data.loc[df_complete_data['Ticket'] == '113572'].isnull().describe() | code |
34148405/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data.loc[df_complete_data['Ticket'] == '113572', ['Embarked']] = 'S'
df_complete_data['Age'].isnull().sum() | code |
34148405/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data.loc[df_complete_data['Ticket'] == '113572', ['Embarked']] = 'S'
df_complete_data['Ticket'].isnull().sum() | code |
34148405/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train_dataset = pd.read_csv('../input/titanic/train.csv')
df_test_dataset = pd.read_csv('../input/titanic/test.csv')
df_gender_submission = pd.read_csv('../input/titanic/gender_submission.csv')
df_complete_data = pd.concat([df_train_dataset, df_test_dataset])
df_complete_data.shape
Med_value = df_complete_data[df_complete_data['Pclass'] == 3]['Fare'].median()
df_complete_data.loc[df_complete_data['Ticket'] == '3701', ['Fare']] = Med_value
df_complete_data['Embarked'].isnull().sum() | code |
50227807/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import matplotlib.image as mpimg
import numpy as np
import os
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
filenames = os.listdir('/kaggle/working/train')
filenames[:5]
def load_data(filenames):
i = 50
X = []
y = []
for name in filenames:
img = mpimg.imread(os.path.join('/kaggle/working/train', name))
X.append(cv2.resize(img, IMAGE_SIZE))
cat = name.split('.')[0]
if cat == 'dog':
y.append(0)
else:
y.append(1)
i -= 1
if i <= 0:
break
return (X, y)
def refine_data(X, y):
X = np.array(X)
X = X.reshape(X.shape[0], -1)
X = X.T
y = np.array(y)
y = y.reshape((1, y.shape[0]))
return (X, y)
X, y = refine_data(X, y)
layer_dims = [X.shape[0], 20, 7, 5, 1]
def initialize_parameters(layer_dims):
np.random.seed(3)
parameters = {}
L = len(layer_dims)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
parameters = initialize_parameters(layer_dims)
parameters | code |
50227807/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
print(os.listdir('../input/dogs-vs-cats')) | code |
50227807/cell_8 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import random
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
filenames = os.listdir('/kaggle/working/train')
filenames[:5]
def load_data(filenames):
i = 50
X = []
y = []
for name in filenames:
img = mpimg.imread(os.path.join('/kaggle/working/train', name))
X.append(cv2.resize(img, IMAGE_SIZE))
cat = name.split('.')[0]
if cat == 'dog':
y.append(0)
else:
y.append(1)
i -= 1
if i <= 0:
break
return (X, y)
sample = random.choice(filenames)
print(sample)
plt.imshow(mpimg.imread('/kaggle/working/train/' + sample))
plt.show() | code |
50227807/cell_15 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.image as mpimg
import numpy as np
import os
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
filenames = os.listdir('/kaggle/working/train')
filenames[:5]
def load_data(filenames):
i = 50
X = []
y = []
for name in filenames:
img = mpimg.imread(os.path.join('/kaggle/working/train', name))
X.append(cv2.resize(img, IMAGE_SIZE))
cat = name.split('.')[0]
if cat == 'dog':
y.append(0)
else:
y.append(1)
i -= 1
if i <= 0:
break
return (X, y)
def refine_data(X, y):
X = np.array(X)
X = X.reshape(X.shape[0], -1)
X = X.T
y = np.array(y)
y = y.reshape((1, y.shape[0]))
return (X, y)
X, y = refine_data(X, y)
layer_dims = [X.shape[0], 20, 7, 5, 1]
def initialize_parameters(layer_dims):
np.random.seed(3)
parameters = {}
L = len(layer_dims)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
parameters = initialize_parameters(layer_dims)
parameters
def linear_fwd(A, W, b):
Z = np.dot(W, A) + b
cache = (A, W, b)
return (Z, cache)
Z, cache = linear_fwd(X, parameters['W1'], parameters['b1'])
Z.shape | code |
50227807/cell_17 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.image as mpimg
import numpy as np
import os
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
filenames = os.listdir('/kaggle/working/train')
filenames[:5]
def load_data(filenames):
i = 50
X = []
y = []
for name in filenames:
img = mpimg.imread(os.path.join('/kaggle/working/train', name))
X.append(cv2.resize(img, IMAGE_SIZE))
cat = name.split('.')[0]
if cat == 'dog':
y.append(0)
else:
y.append(1)
i -= 1
if i <= 0:
break
return (X, y)
def refine_data(X, y):
X = np.array(X)
X = X.reshape(X.shape[0], -1)
X = X.T
y = np.array(y)
y = y.reshape((1, y.shape[0]))
return (X, y)
X, y = refine_data(X, y)
layer_dims = [X.shape[0], 20, 7, 5, 1]
def initialize_parameters(layer_dims):
np.random.seed(3)
parameters = {}
L = len(layer_dims)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
parameters = initialize_parameters(layer_dims)
parameters
def linear_fwd(A, W, b):
Z = np.dot(W, A) + b
cache = (A, W, b)
return (Z, cache)
Z, cache = linear_fwd(X, parameters['W1'], parameters['b1'])
Z.shape
def sigmoid(Z):
A = 1 / (1 + np.exp(-Z))
cache = Z
return (A, Z)
def relu(Z):
A = np.maximum(Z, 0)
cache = Z
return (A, Z)
sigmoid(np.array([0, 2])) | code |
50227807/cell_10 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.image as mpimg
import numpy as np
import os
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
filenames = os.listdir('/kaggle/working/train')
filenames[:5]
def load_data(filenames):
i = 50
X = []
y = []
for name in filenames:
img = mpimg.imread(os.path.join('/kaggle/working/train', name))
X.append(cv2.resize(img, IMAGE_SIZE))
cat = name.split('.')[0]
if cat == 'dog':
y.append(0)
else:
y.append(1)
i -= 1
if i <= 0:
break
return (X, y)
def refine_data(X, y):
X = np.array(X)
X = X.reshape(X.shape[0], -1)
X = X.T
y = np.array(y)
y = y.reshape((1, y.shape[0]))
return (X, y)
X, y = refine_data(X, y)
print(X.shape)
print(y.shape) | code |
50227807/cell_5 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import random
import os
filenames = os.listdir('/kaggle/working/train')
filenames[:5] | code |
128027940/cell_6 | [
"text_plain_output_1.png"
] | !python -m spacy train /kaggle/working/config.cfg --output ./spacy_output --paths.train /kaggle/input/ir-silver-data/dev_data.spacy --paths.dev /kaggle/input/ir-silver-data/test_data.spacy --gpu-id 0 | code |
128027940/cell_1 | [
"text_plain_output_1.png"
] | !pip install spacy==3.4.4 | code |
128027940/cell_10 | [
"text_plain_output_1.png"
] | from spacy.tokens import DocBin
import spacy
train_bin = DocBin().from_disk('/kaggle/input/ir-silver-data/dev_data.spacy')
nlp = spacy.load('/kaggle/input/scispacy-model/en_core_sci_sm')
docs = train_bin.get_docs(nlp.vocab)
for doc in docs:
for ent in doc.ents:
print(ent, ent.label_)
break | code |
128027940/cell_5 | [
"text_plain_output_1.png"
] | # Run in case of error
! python -m spacy debug data /kaggle/working/config.cfg --paths.train /kaggle/input/ir-silver-data/train_data.spacy --paths.dev /kaggle/input/ir-silver-data/dev_data.spacy | code |
33109829/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
print(sales_train_validation.shape)
print(10 * (1437 + 1047 + 565)) | code |
33109829/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
def plot_state_category(sales_train_validation, calendar_dates, state, category=None, start_time='2015'):
sales_state_category = sales_train_validation.loc[sales_train_validation['state_id'] == state ]
if category != None :
sales_state_category = sales_state_category.loc[sales_state_category['cat_id'] == category]
aggregate_ssc = sales_state_category.groupby(by=['dept_id'],axis=0).mean()
aggregate_ssc.columns = calendar_dates['date']
agg_ssc_trans = aggregate_ssc.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [25, 15]
plt.rcParams['lines.linewidth'] = 2
ax = agg_ssc_trans[start_time:].plot(title="MEANed numbers State: {}, Category: {}".format(state, category))
ax.set_ylabel('Units sold')
plt.show()
plot_state_category(sales_train_validation, calendar_stv, 'WI', category='FOODS', start_time='2013') | code |
33109829/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
def plot_state_category(sales_train_validation, calendar_dates, state, category=None, start_time='2015'):
sales_state_category = sales_train_validation.loc[sales_train_validation['state_id'] == state ]
if category != None :
sales_state_category = sales_state_category.loc[sales_state_category['cat_id'] == category]
aggregate_ssc = sales_state_category.groupby(by=['dept_id'],axis=0).mean()
aggregate_ssc.columns = calendar_dates['date']
agg_ssc_trans = aggregate_ssc.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [25, 15]
plt.rcParams['lines.linewidth'] = 2
ax = agg_ssc_trans[start_time:].plot(title="MEANed numbers State: {}, Category: {}".format(state, category))
ax.set_ylabel('Units sold')
plt.show()
plot_state_category(sales_train_validation, calendar_stv, 'CA', category='FOODS', start_time='2013') | code |
33109829/cell_19 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
agg_state_trans['WI']['2015':].plot(title='WI')
plt.show() | code |
33109829/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import matplotlib.pyplot as plt | code |
33109829/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
sales_train_validation.groupby(['store_id'])['cat_id'].value_counts().plot(kind='bar')
plt.show() | code |
33109829/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
agg_state_trans['TX']['2015':].plot(title='TX')
plt.show() | code |
33109829/cell_28 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
def plot_state_category(sales_train_validation, calendar_dates, state, category=None, start_time='2015'):
sales_state_category = sales_train_validation.loc[sales_train_validation['state_id'] == state ]
if category != None :
sales_state_category = sales_state_category.loc[sales_state_category['cat_id'] == category]
aggregate_ssc = sales_state_category.groupby(by=['dept_id'],axis=0).mean()
aggregate_ssc.columns = calendar_dates['date']
agg_ssc_trans = aggregate_ssc.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [25, 15]
plt.rcParams['lines.linewidth'] = 2
ax = agg_ssc_trans[start_time:].plot(title="MEANed numbers State: {}, Category: {}".format(state, category))
ax.set_ylabel('Units sold')
plt.show()
light_sales = sales_train_validation.drop(['item_id', 'dept_id', 'cat_id', 'store_id'], axis=1)
light_sales = light_sales.groupby('state_id').mean()
light_sales.columns = calendar_stv['date']
light_s_t = light_sales.transpose()
light_s_t['14-12-2013':'31-12-2013'].plot()
plt.show() | code |
33109829/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
calendar_stv.info() | code |
33109829/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
agg_state_trans['CA']['2015':].plot(title='CA')
plt.show() | code |
33109829/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
def plot_state_category(sales_train_validation, calendar_dates, state, category=None, start_time='2015'):
sales_state_category = sales_train_validation.loc[sales_train_validation['state_id'] == state ]
if category != None :
sales_state_category = sales_state_category.loc[sales_state_category['cat_id'] == category]
aggregate_ssc = sales_state_category.groupby(by=['dept_id'],axis=0).mean()
aggregate_ssc.columns = calendar_dates['date']
agg_ssc_trans = aggregate_ssc.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [25, 15]
plt.rcParams['lines.linewidth'] = 2
ax = agg_ssc_trans[start_time:].plot(title="MEANed numbers State: {}, Category: {}".format(state, category))
ax.set_ylabel('Units sold')
plt.show()
plot_state_category(sales_train_validation, calendar_stv, 'TX', category='FOODS', start_time='2013') | code |
33109829/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'], axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title='Gennemsnitlig salg per stat')
ax.set_ylabel('Solgte enheder')
plt.show() | code |
33109829/cell_27 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title="Summeret salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_mean = sales_train_validation.groupby(by=['state_id'],axis=0).mean()
aggregate_state_mean.columns = calendar_stv['date']
agg_state_mean_trans = aggregate_state_mean.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_mean_trans['2015':].plot(title="Gennemsnitlig salg per stat")
ax.set_ylabel('Solgte enheder')
plt.show()
aggregate_state_category = sales_train_validation.groupby(by=['state_id', 'cat_id'], axis=0).sum()
aggregate_state_category.columns = calendar_stv['date']
agg_state_trans = aggregate_state_category.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 4
def plot_state_category(sales_train_validation, calendar_dates, state, category=None, start_time='2015'):
sales_state_category = sales_train_validation.loc[sales_train_validation['state_id'] == state ]
if category != None :
sales_state_category = sales_state_category.loc[sales_state_category['cat_id'] == category]
aggregate_ssc = sales_state_category.groupby(by=['dept_id'],axis=0).mean()
aggregate_ssc.columns = calendar_dates['date']
agg_ssc_trans = aggregate_ssc.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [25, 15]
plt.rcParams['lines.linewidth'] = 2
ax = agg_ssc_trans[start_time:].plot(title="MEANed numbers State: {}, Category: {}".format(state, category))
ax.set_ylabel('Units sold')
plt.show()
light_sales = sales_train_validation.drop(['item_id', 'dept_id', 'cat_id', 'store_id'], axis=1)
light_sales = light_sales.groupby('state_id').mean()
light_sales.columns = calendar_stv['date']
light_s_t = light_sales.transpose() | code |
33109829/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
aggregate_state_sum = sales_train_validation.groupby(by=['state_id'], axis=0).sum()
aggregate_state_sum.columns = calendar_stv['date']
agg_state_sum_trans = aggregate_state_sum.transpose()
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [20, 10]
plt.rcParams['lines.linewidth'] = 2
ax = agg_state_sum_trans['2015':].plot(title='Summeret salg per stat')
ax.set_ylabel('Solgte enheder')
plt.show() | code |
33109829/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
calendar_df = pd.read_csv('../input/m5-forecasting-accuracy/calendar.csv', parse_dates=['date'], usecols=['date', 'd'])
calendar_stv = calendar_df[:1913]
sales_train_validation = pd.read_csv('../input/m5-forecasting-accuracy/sales_train_validation.csv', index_col='id')
sales_train_validation.head() | code |
2042802/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age', 'DailyRate', 'DistanceFromHome', 'Education', 'EmployeeCount', 'EmployeeNumber', 'EnvironmentSatisfaction', 'HourlyRate', 'JobInvolvement', 'JobLevel', 'JobSatisfaction', 'MonthlyIncome', 'MonthlyRate', 'NumCompaniesWorked', 'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction', 'StandardHours', 'StockOptionLevel', 'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager']
corr = hr_attrition[numeric_columns].corr()
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, center=0, yticklabels='auto', xticklabels=False, square=True, linewidths=0.5, annot=True, fmt='.1f') | code |
2042802/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears', 'YearsAtCompany', 'Education', 'StockOptionLevel']
fig, ax = plt.subplots(1, 1, figsize=(15, 15))
j = 0
title_Str = 'Plotting the count distributions of various numeric Features'
for i in numeric_columns:
j += 1
draw_subplots(i, title_Str, 3, 2, j, 20)
sns.countplot(hr_attrition[i], hue=hr_attrition['Attrition'])
plt.xlabel('') | code |
2042802/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age','DailyRate','DistanceFromHome','Education','EmployeeCount','EmployeeNumber',
'EnvironmentSatisfaction','HourlyRate','JobInvolvement','JobLevel','JobSatisfaction',
'MonthlyIncome','MonthlyRate','NumCompaniesWorked','PercentSalaryHike','PerformanceRating',
'RelationshipSatisfaction','StandardHours','StockOptionLevel','TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole',
'YearsSinceLastPromotion','YearsWithCurrManager']
# Site :: http://seaborn.pydata.org/examples/many_pairwise_correlations.html
# Compute the correlation matrix
corr=hr_attrition[numeric_columns].corr()
fig,ax = plt.subplots(1,1, figsize=(20,20))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,yticklabels="auto",xticklabels=False,
square=True, linewidths=.5,annot=True, fmt= '.1f')
sns.factorplot(x='JobSatisfaction', y='MonthlyIncome', hue='Attrition', col='Education', col_wrap=2, kind='box', data=hr_attrition) | code |
2042802/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsWithCurrManager']
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
j = 0
title_Str = 'Plotting the Boxplot distribution of various numeric Features'
for i in numeric_columns:
j += 1
draw_subplots(i, title_Str, 3, 2, j, 20)
sns.boxplot(hr_attrition.Attrition, hr_attrition[i])
plt.xlabel('') | code |
2042802/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
def draw_subplots(var_Name, tittle_Name, nrow=1, ncol=1, idx=1, fz=10):
ax = plt.subplot(nrow, ncol, idx)
ax.set_title('Distribution of ' + var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
j = 0
title_Str = 'Plotting the density distribution of various numeric Features'
for i in numeric_columns:
j += 1
draw_subplots(i, title_Str, 3, 1, j, 20)
sns.distplot(hr_attrition[i])
plt.xlabel('') | code |
2042802/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age','DailyRate','DistanceFromHome','Education','EmployeeCount','EmployeeNumber',
'EnvironmentSatisfaction','HourlyRate','JobInvolvement','JobLevel','JobSatisfaction',
'MonthlyIncome','MonthlyRate','NumCompaniesWorked','PercentSalaryHike','PerformanceRating',
'RelationshipSatisfaction','StandardHours','StockOptionLevel','TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole',
'YearsSinceLastPromotion','YearsWithCurrManager']
# Site :: http://seaborn.pydata.org/examples/many_pairwise_correlations.html
# Compute the correlation matrix
corr=hr_attrition[numeric_columns].corr()
fig,ax = plt.subplots(1,1, figsize=(20,20))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,yticklabels="auto",xticklabels=False,
square=True, linewidths=.5,annot=True, fmt= '.1f')
sns.factorplot(x='Department', y='Education', hue='Attrition', col='BusinessTravel', row='OverTime', kind='box', data=hr_attrition) | code |
2042802/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age','DailyRate','DistanceFromHome','Education','EmployeeCount','EmployeeNumber',
'EnvironmentSatisfaction','HourlyRate','JobInvolvement','JobLevel','JobSatisfaction',
'MonthlyIncome','MonthlyRate','NumCompaniesWorked','PercentSalaryHike','PerformanceRating',
'RelationshipSatisfaction','StandardHours','StockOptionLevel','TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole',
'YearsSinceLastPromotion','YearsWithCurrManager']
# Site :: http://seaborn.pydata.org/examples/many_pairwise_correlations.html
# Compute the correlation matrix
corr=hr_attrition[numeric_columns].corr()
fig,ax = plt.subplots(1,1, figsize=(20,20))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,yticklabels="auto",xticklabels=False,
square=True, linewidths=.5,annot=True, fmt= '.1f')
sns.kdeplot(hr_attrition.JobSatisfaction, hr_attrition.JobInvolvement) | code |
2042802/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age','DailyRate','DistanceFromHome','Education','EmployeeCount','EmployeeNumber',
'EnvironmentSatisfaction','HourlyRate','JobInvolvement','JobLevel','JobSatisfaction',
'MonthlyIncome','MonthlyRate','NumCompaniesWorked','PercentSalaryHike','PerformanceRating',
'RelationshipSatisfaction','StandardHours','StockOptionLevel','TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole',
'YearsSinceLastPromotion','YearsWithCurrManager']
# Site :: http://seaborn.pydata.org/examples/many_pairwise_correlations.html
# Compute the correlation matrix
corr=hr_attrition[numeric_columns].corr()
fig,ax = plt.subplots(1,1, figsize=(20,20))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,yticklabels="auto",xticklabels=False,
square=True, linewidths=.5,annot=True, fmt= '.1f')
sns.kdeplot(hr_attrition.Education, hr_attrition.EnvironmentSatisfaction) | code |
2042802/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values | code |
2042802/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age','DailyRate','DistanceFromHome','Education','EmployeeCount','EmployeeNumber',
'EnvironmentSatisfaction','HourlyRate','JobInvolvement','JobLevel','JobSatisfaction',
'MonthlyIncome','MonthlyRate','NumCompaniesWorked','PercentSalaryHike','PerformanceRating',
'RelationshipSatisfaction','StandardHours','StockOptionLevel','TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole',
'YearsSinceLastPromotion','YearsWithCurrManager']
# Site :: http://seaborn.pydata.org/examples/many_pairwise_correlations.html
# Compute the correlation matrix
corr=hr_attrition[numeric_columns].corr()
fig,ax = plt.subplots(1,1, figsize=(20,20))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,yticklabels="auto",xticklabels=False,
square=True, linewidths=.5,annot=True, fmt= '.1f')
sns.factorplot(x='WorkLifeBalance', y='JobRole', hue='Attrition', col='PerformanceRating', col_wrap=2, kind='box', data=hr_attrition) | code |
2042802/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
###########################################################################################
# Define a common module for drawing subplots
# This module will draw subplot based on the parameters
# There will be mutiple subplots within the main plotting window
# Defination of the parameters are-
# var_Name - this is the variable name from the data file
# tittle_Name - this is the Tittle name give for the plot
# nrow & ncol - this is the number of subplots within the main plotting window
# idx - position of subplot in the main plotting window
# fz - the font size of Tittle in the main plotting window
##########################################################################################
def draw_subplots(var_Name,tittle_Name,nrow=1,ncol=1,idx=1,fz=10):
ax = plt.subplot(nrow,ncol,idx)
ax.set_title('Distribution of '+var_Name)
plt.suptitle(tittle_Name, fontsize=fz)
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the density distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,1,j,20) # create a 1x3 subplots for plotting distribution plots
sns.distplot(hr_attrition[i])
plt.xlabel('')
numeric_columns = ['Age', 'DistanceFromHome', 'TotalWorkingYears',
'YearsAtCompany', 'Education','StockOptionLevel']
fig,ax = plt.subplots(1,1, figsize=(15,15))
j=0 # reset the counter to plot
title_Str="Plotting the count distributions of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.countplot(hr_attrition[i],hue=hr_attrition["Attrition"])
plt.xlabel('')
numeric_columns = ['Age', 'MonthlyIncome', 'TotalWorkingYears',
'YearsAtCompany', 'YearsInCurrentRole','YearsWithCurrManager']
fig,ax = plt.subplots(1,1, figsize=(10,10))
j=0 # reset the counter to plot
title_Str="Plotting the Boxplot distribution of various numeric Features"
for i in numeric_columns:
j +=1
draw_subplots(i,title_Str,3,2,j,20) # create a 3x2 subplots for plotting distribution plots
sns.boxplot(hr_attrition.Attrition, hr_attrition[i]) # Note the change in bottom level
plt.xlabel('')
numeric_columns = ['Age','DailyRate','DistanceFromHome','Education','EmployeeCount','EmployeeNumber',
'EnvironmentSatisfaction','HourlyRate','JobInvolvement','JobLevel','JobSatisfaction',
'MonthlyIncome','MonthlyRate','NumCompaniesWorked','PercentSalaryHike','PerformanceRating',
'RelationshipSatisfaction','StandardHours','StockOptionLevel','TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole',
'YearsSinceLastPromotion','YearsWithCurrManager']
# Site :: http://seaborn.pydata.org/examples/many_pairwise_correlations.html
# Compute the correlation matrix
corr=hr_attrition[numeric_columns].corr()
fig,ax = plt.subplots(1,1, figsize=(20,20))
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,yticklabels="auto",xticklabels=False,
square=True, linewidths=.5,annot=True, fmt= '.1f')
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
labels = hr_attrition['JobRole'].unique()
jr_array = []
for i in range(len(labels)):
jr_array.append(hr_attrition['JobRole'].value_counts()[i])
plt.pie(jr_array, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
plt.title('Job Role Pie Chart', fontsize=20)
plt.show() | code |
2042802/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
hr_attrition = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv', header=0)
hr_attrition.columns.values
hr_attrition.info() | code |
74067375/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
cars = {'Brand': ['Honda Civic', 'Toyota Corolla', 'Ford Focus', 'Audi A4'], 'Price': [22000, 25000, 27000, 35000], 'Year': [2015, 2013, 2018, 2018]}
df = pd.DataFrame(cars, columns=['Brand', 'Price', 'Year'])
df.sort_values(by=['Brand'], inplace=True)
df.head() | code |
74067375/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
cars = {'Brand': ['Honda Civic', 'Toyota Corolla', 'Ford Focus', 'Audi A4'], 'Price': [22000, 25000, 27000, 35000], 'Year': [2015, 2013, 2018, 2018]}
df = pd.DataFrame(cars, columns=['Brand', 'Price', 'Year'])
df.sort_values(by=['Brand'], inplace=True)
import pandas as pd
data = {'Product': ['ABC', 'XYZ'], 'Price': ['250', '270']}
df = pd.DataFrame(data)
print(df)
print(df.dtypes) | code |
74067375/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
cars = {'Brand': ['Honda Civic', 'Toyota Corolla', 'Ford Focus', 'Audi A4'], 'Price': [22000, 25000, 27000, 35000], 'Year': [2015, 2013, 2018, 2018]}
df = pd.DataFrame(cars, columns=['Brand', 'Price', 'Year'])
df.sort_values(by=['Brand'], inplace=True)
import pandas as pd
data = {'Product': ['ABC', 'XYZ'], 'Price': ['250', '270']}
df = pd.DataFrame(data)
df['DataFrame Column'] = df['DataFrame Column'].astype(float) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.