path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
104124784/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.describe() | code |
104124784/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
test.head() | code |
104124784/cell_34 | [
"image_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder, StandardScaler
import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0]
df.isnull().sum()
ohe = OneHotEncoder(sparse=False, handle_unknown='ignore')
transformed_f = ohe.fit_transform(df[['Sex', 'Embarked']])
transformed_f = pd.DataFrame(transformed_f, columns=['Sex0', 'Sex1', 'Embarked0', 'Embarked1', 'Embarked2'])
df = df.join(transformed_f)
df = df.drop(['Sex', 'Embarked'], axis=1)
train_final = df.loc[:train.index.max(), :].copy()
test_final = df.loc[:test.index.max()].copy()
print(train_final.shape)
print(test_final.shape) | code |
104124784/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0]
df.isnull().sum()
df.head() | code |
104124784/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df.head() | code |
104124784/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
print('train', train.shape)
print('test', test.shape) | code |
104124784/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0] | code |
104124784/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.head() | code |
104124784/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0]
sns.histplot(data=df['Fare'], color='teal', kde=True)
plt.show() | code |
104124784/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder, StandardScaler
import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0]
df.isnull().sum()
ohe = OneHotEncoder(sparse=False, handle_unknown='ignore')
transformed_f = ohe.fit_transform(df[['Sex', 'Embarked']])
transformed_f = pd.DataFrame(transformed_f, columns=['Sex0', 'Sex1', 'Embarked0', 'Embarked1', 'Embarked2'])
df = df.join(transformed_f)
df = df.drop(['Sex', 'Embarked'], axis=1)
print(df.shape)
print(train.shape)
print(test.shape) | code |
104124784/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.info() | code |
104124784/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0]
df['Embarked'].mode() | code |
104124784/cell_3 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.head() | code |
104124784/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder, StandardScaler
import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
df = pd.concat([train, test], axis=0).reset_index(drop=True)
df = df.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1)
df.isnull().sum().sort_values(ascending=False) * 100 / df.shape[0]
df.isnull().sum()
ohe = OneHotEncoder(sparse=False, handle_unknown='ignore')
transformed_f = ohe.fit_transform(df[['Sex', 'Embarked']])
transformed_f = pd.DataFrame(transformed_f, columns=['Sex0', 'Sex1', 'Embarked0', 'Embarked1', 'Embarked2'])
df = df.join(transformed_f)
df = df.drop(['Sex', 'Embarked'], axis=1)
df.head() | code |
34144131/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from multiprocessing import cpu_count
from multiprocessing.dummy import Pool
import cv2
import gluoncv as gcv
import gluoncv as gcv
import gluoncv as gcv
import json
import mxnet as mx
import mxnet as mx
import mxnet as mx
import numpy as np
import os
import os
import pandas as pd
import pandas as pd
import random
import random
import time
import os
import cv2
import json
import random
import numpy as np
import mxnet as mx
import pandas as pd
import gluoncv as gcv
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool
def load_dataset(root):
csv = pd.read_csv(os.path.join(root, 'train.csv'))
data = {}
for i in csv.index:
key = csv['image_id'][i]
bbox = json.loads(csv['bbox'][i])
bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3], 0.0]
if key in data:
data[key].append(bbox)
else:
data[key] = [bbox]
return sorted([(k, os.path.join(root, 'train', k + '.jpg'), v) for k, v in data.items()], key=lambda x: x[0])
def load_image(path):
with open(path, 'rb') as f:
buf = f.read()
return mx.image.imdecode(buf)
def get_batches(dataset, batch_size, width=512, height=512, net=None, ctx=mx.cpu()):
batches = len(dataset) // batch_size
if batches * batch_size < len(dataset):
batches += 1
sampler = Sampler(width, height, net)
with Pool(cpu_count() * 2) as p:
for i in range(batches):
start = i * batch_size
samples = p.map(sampler, dataset[start:start + batch_size])
stack_fn = [gcv.data.batchify.Stack()]
pad_fn = [gcv.data.batchify.Pad(pad_val=-1)]
if net is None:
batch = gcv.data.batchify.Tuple(*stack_fn + pad_fn)(samples)
else:
batch = gcv.data.batchify.Tuple(*stack_fn * 6 + pad_fn)(samples)
yield [x.as_in_context(ctx) for x in batch]
def gauss_blur(image, level):
return cv2.blur(image, (level * 2 + 1, level * 2 + 1))
def gauss_noise(image):
for i in range(image.shape[2]):
c = image[:, :, i]
diff = 255 - c.max()
noise = np.random.normal(0, random.randint(1, 6), c.shape)
noise = (noise - noise.min()) / (noise.max() - noise.min())
noise = diff * noise
image[:, :, i] = c + noise.astype(np.uint8)
return image
class Sampler:
def __init__(self, width, height, net=None, **kwargs):
self._net = net
if net is None:
self._transform = gcv.data.transforms.presets.yolo.YOLO3DefaultValTransform(width, height, **kwargs)
else:
self._transform = gcv.data.transforms.presets.yolo.YOLO3DefaultTrainTransform(width, height, net=net, **kwargs)
def __call__(self, data):
raw = load_image(data[1])
bboxes = np.array(data[2])
if not self._net is None:
raw = raw.asnumpy()
blur = random.randint(0, 3)
if blur > 0:
raw = gauss_blur(raw, blur)
raw = gauss_noise(raw)
raw = mx.nd.array(raw)
h, w, _ = raw.shape
raw, flips = gcv.data.transforms.image.random_flip(raw, py=0.5)
bboxes = gcv.data.transforms.bbox.flip(bboxes, (w, h), flip_y=flips[1])
res = self._transform(raw, bboxes)
return [mx.nd.array(x) for x in res]
import mxnet as mx
import gluoncv as gcv
def load_model(path, ctx=mx.cpu()):
net = gcv.model_zoo.yolo3_darknet53_custom(['wheat'], pretrained_base=False)
net.set_nms(post_nms=150)
net.load_parameters(path, ctx=ctx)
return net
import os
import time
import random
import mxnet as mx
import pandas as pd
import gluoncv as gcv
max_epochs = 4
learning_rate = 0.001
batch_size = 16
img_s = 512
threshold = 0.1
context = mx.gpu()
print('Loading model...')
model = load_model('/kaggle/input/global-wheat-detection-models/global-wheat-yolo3-darknet53.params', ctx=context)
print('Loading test images...')
test_images = [(os.path.join(dirname, filename), os.path.splitext(filename)[0]) for dirname, _, filenames in os.walk('/kaggle/input/global-wheat-detection/test') for filename in filenames]
print('Pseudo labaling...')
pseudo_set = []
for path, image_id in test_images:
print(path)
raw = load_image(path)
x, _ = gcv.data.transforms.presets.yolo.transform_test(raw, short=img_s)
classes, scores, bboxes = model(x.as_in_context(context))
bboxes[0, :, 0::2] = (bboxes[0, :, 0::2] / x.shape[3]).clip(0.0, 1.0) * raw.shape[1]
bboxes[0, :, 1::2] = (bboxes[0, :, 1::2] / x.shape[2]).clip(0.0, 1.0) * raw.shape[0]
pseudo_set.append((image_id, path, [[round(x) for x in bboxes[0, i].asnumpy().tolist()] + [0.0] for i in range(classes.shape[1]) if model.classes[int(classes[0, i].asscalar())] == 'wheat' and scores[0, i].asscalar() > threshold]))
print('Loading training set...')
training_set = load_dataset('/kaggle/input/global-wheat-detection') + pseudo_set
print('Re-training...')
trainer = mx.gluon.Trainer(model.collect_params(), 'Nadam', {'learning_rate': learning_rate})
for epoch in range(max_epochs):
ts = time.time()
random.shuffle(training_set)
training_total_L = 0.0
training_batches = 0
for x, objectness, center_targets, scale_targets, weights, class_targets, gt_bboxes in get_batches(training_set, batch_size, width=img_s, height=img_s, net=model, ctx=context):
training_batches += 1
with mx.autograd.record():
obj_loss, center_loss, scale_loss, cls_loss = model(x, gt_bboxes, objectness, center_targets, scale_targets, weights, class_targets)
L = obj_loss + center_loss + scale_loss + cls_loss
L.backward()
trainer.step(x.shape[0])
training_batch_L = mx.nd.mean(L).asscalar()
if training_batch_L != training_batch_L:
raise ValueError()
training_total_L += training_batch_L
print('[Epoch %d Batch %d] batch_loss %.10f average_loss %.10f elapsed %.2fs' % (epoch, training_batches, training_batch_L, training_total_L / training_batches, time.time() - ts))
training_avg_L = training_total_L / training_batches
print('[Epoch %d] training_loss %.10f duration %.2fs' % (epoch + 1, training_avg_L, time.time() - ts))
print('Inference...')
results = []
for path, image_id in test_images:
print(path)
raw = load_image(path)
x, _ = gcv.data.transforms.presets.yolo.transform_test(raw, short=img_s)
classes, scores, bboxes = model(x.as_in_context(context))
bboxes[0, :, 0::2] = (bboxes[0, :, 0::2] / x.shape[3]).clip(0.0, 1.0) * raw.shape[1]
bboxes[0, :, 1::2] = (bboxes[0, :, 1::2] / x.shape[2]).clip(0.0, 1.0) * raw.shape[0]
bboxes[0, :, 2:4] -= bboxes[0, :, 0:2]
results.append({'image_id': image_id, 'PredictionString': ' '.join([' '.join([str(x) for x in [scores[0, i].asscalar()] + [round(x) for x in bboxes[0, i].asnumpy().tolist()]]) for i in range(classes.shape[1]) if model.classes[int(classes[0, i].asscalar())] == 'wheat' and scores[0, i].asscalar() > threshold])})
pd.DataFrame(results, columns=['image_id', 'PredictionString']).to_csv('submission.csv', index=False) | code |
73070153/cell_21 | [
"text_plain_output_1.png"
] | from gensim.corpora import Dictionary
from gensim.models import TfidfModel, LsiModel, Word2Vec
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import re
stop_words = stopwords.words('english')
def clean_text(txt: str):
"""Clean and lower case text."""
txt = re.sub('[^A-Za-z0-9]+', ' ', str(txt).lower())
txt = re.sub('\\b\\d+\\b', '', txt).strip()
return txt
def tokenizer(txt: str):
"""Custom tokenizer."""
tokens = []
for sent in sent_tokenize(txt, language='english'):
for word in word_tokenize(clean_text(sent), language='english'):
if len(word) < 2:
continue
if word in stop_words:
continue
tokens.append(word)
return tokens
tokenized_data = [tokenizer(doc) for doc in data]
dct = Dictionary(tokenized_data)
rare_ids = [tokenid for tokenid, wordfreq in dct.cfs.items() if wordfreq < 2]
dct.filter_tokens(rare_ids)
corpus = [dct.doc2bow(line) for line in tokenized_data]
tfidf_model = TfidfModel(corpus, id2word=dct)
tfidf_matrix = tfidf_model[corpus]
print('Size of LSI vocab.:', len(dct.keys()))
print('Size of w2v vocab.:', len(w2v_model.wv.key_to_index.keys())) | code |
73070153/cell_44 | [
"text_plain_output_1.png"
] | w2v_model = Word2Vec(vector_size=dim_w2v, alpha=alpha, min_alpha=alpha_min, window=wnd, min_count=mincount, sample=sample, sg=sg, negative=ngt, workers=threads)
word_freq = {dct[k]: v for k, v in dct.cfs.items()}
w2v_model.build_vocab_from_freq(word_freq)
num_samples = dct.num_docs
w2v_model.train(corpus_w2v, total_examples=num_samples, epochs=epochs) | code |
73070153/cell_39 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | lsi_model = LsiModel(corpus=tfidf_model[corpus], id2word=dct, num_topics=dim_lsi) | code |
73070153/cell_26 | [
"text_plain_output_1.png"
] | word_freq = {dct[k]: v for k, v in dct.cfs.items()}
w2v_model.build_vocab_from_freq(word_freq)
num_samples = dct.num_docs
w2v_model.train(tokenized_data, total_examples=num_samples, epochs=epochs) | code |
73070153/cell_19 | [
"text_plain_output_1.png"
] | w2v_model = Word2Vec(sentences=tokenized_data, vector_size=dim_w2v, alpha=alpha, min_alpha=alpha_min, window=wnd, min_count=mincount, sample=sample, sg=sg, negative=ngt, epochs=epochs, workers=threads) | code |
73070153/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import nltk
nltk.download('stopwords')
nltk.download('punkt') | code |
73070153/cell_15 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | dim_lsi = 200
lsi_model = LsiModel(corpus=tfidf_matrix, id2word=dct, num_topics=dim_lsi) | code |
16129109/cell_2 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import os
from PIL import Image
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from keras.models import Sequential, Model
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.utils import to_categorical | code |
16129109/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
import numpy as np
import os
import pandas as pd
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
print('TRAIN---------------------')
print('Shape: {}'.format(train.shape))
print('Label 0 (False): {}'.format(np.sum(labels_train.has_cactus == 0)))
print('Label 1 (True): {}'.format(np.sum(labels_train.has_cactus == 1)))
print('TEST----------------------')
print('Shape: {}'.format(test.shape)) | code |
16129109/cell_15 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.models import Sequential, Model
from keras.utils import to_categorical
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = Sequential()
model.add(Dense(5, activation='sigmoid', input_shape=(1024,)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(2, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, to_categorical(y_train), epochs=10) | code |
16129109/cell_17 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.models import Sequential, Model
from keras.utils import to_categorical
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = Sequential()
model.add(Dense(5, activation='sigmoid', input_shape=(1024,)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(2, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, to_categorical(y_train), epochs=10)
model = Sequential()
model.add(Conv1D(filters=4, kernel_size=4, input_shape=(32, 32)))
model.add(MaxPooling1D(pool_size=4))
model.add(Flatten())
model.add(Dense(2, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(np.array(X_train).reshape((10000, 32, 32)), to_categorical(y_train), epochs=10) | code |
16129109/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test) | code |
16129109/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import pandas as pd
import random
def get_pixel_data(filepath):
"""
Get the pixel data from an image as a pandas DataFrame.
"""
image = Image.open(filepath)
pixel_data = np.array(image.getdata())
pixel_data = pixel_data.mean(axis=1)
pixel_data = pixel_data.reshape(1, 32 * 32)
pixel_data = pd.DataFrame(pixel_data, columns=np.arange(32 * 32))
image.close()
return pixel_data
path = '../input/train/train/'
train = pd.DataFrame()
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
train = train.append(image, ignore_index=True)
labels_train = pd.read_csv('../input/train.csv').sort_values('id')
path = '../input/test/test/'
test = pd.DataFrame()
test_id = []
for file in sorted(os.listdir(path)):
image = get_pixel_data(path + file)
test = test.append(image, ignore_index=True)
test_id.append(file)
random.seed(0)
idx = random.choices(range(17500), k=10000)
X_train = train.iloc[idx]
X_test = train.drop(idx, axis=0)
y_train = labels_train.iloc[idx, 1]
y_test = labels_train.drop(idx, axis=0).iloc[:, 1]
model = LogisticRegression(solver='lbfgs', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test)
model = RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=0)
model.fit(X_train, y_train)
model.score(X_test, y_test) | code |
128017800/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
sadSet = track_csv[track_csv['track_genre'] == 'sad']
pagodeSet = track_csv[track_csv['track_genre'] == 'pagode']
metalSet = track_csv[track_csv['track_genre'] == 'metal']
sns.relplot(data=metalSet, x='energy', y='liveness') | code |
128017800/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
sadSet = track_csv[track_csv['track_genre'] == 'sad']
sns.relplot(data=sadSet, x='duration', y='popularity', hue='key') | code |
128017800/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByGenre.head(20) | code |
128017800/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
sadSet = track_csv[track_csv['track_genre'] == 'sad']
pagodeSet = track_csv[track_csv['track_genre'] == 'pagode']
sns.countplot(data=pagodeSet, x='mode') | code |
128017800/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128017800/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
popularityByArtist.head(20) | code |
128017800/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
metallicaSet = track_csv[track_csv['artists'] == 'Metallica']
metallicaSet.shape[0] | code |
128017800/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
sadSet = track_csv[track_csv['track_genre'] == 'sad']
pagodeSet = track_csv[track_csv['track_genre'] == 'pagode']
metalSet = track_csv[track_csv['track_genre'] == 'metal']
metallicaSet = track_csv[track_csv['artists'] == 'Metallica']
metallicaSet.shape[0]
metallicaAlbumSorted = metallicaSet.groupby(metallicaSet['album_name'])['popularity'].mean().sort_values(ascending=False).reset_index()
sns.barplot(data=metallicaAlbumSorted, x='popularity', y='album_name') | code |
128017800/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
track_csv.head() | code |
128017800/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
popularityByGenre = track_csv.groupby([track_csv['track_genre']])['popularity'].mean().sort_values(ascending=False)
popularityByArtist = track_csv.groupby(track_csv['artists'])['popularity'].mean().sort_values(ascending=False)
sadSet = track_csv[track_csv['track_genre'] == 'sad']
pagodeSet = track_csv[track_csv['track_genre'] == 'pagode']
metalSet = track_csv[track_csv['track_genre'] == 'metal']
metallicaSet = track_csv[track_csv['artists'] == 'Metallica']
metallicaSet.shape[0]
metallicaAlbumSorted = metallicaSet.groupby(metallicaSet['album_name'])['popularity'].mean().sort_values(ascending=False).reset_index()
sns.countplot(data=metallicaSet, x='key') | code |
128017800/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
track_csv = pd.read_table('/kaggle/input/-spotify-tracks-dataset/dataset.csv', sep=',')
track_csv = track_csv.rename(columns={'duration_ms': 'duration'})
track_csv['duration'] = track_csv['duration'] / 60000
track_csv['time_signature'].value_counts() | code |
73096770/cell_9 | [
"image_output_1.png"
] | import os
import pandas as pd
product_name_dictionary = {}
product_name_dictionary2 = {}
for dirname, _, filenames in os.walk('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/'):
for filename in filenames:
engagement_data_path = os.path.join(dirname, filename)
df_temp = pd.read_csv(engagement_data_path, dtype={'lp_id': str})
df_temp = df_temp.fillna(0)
df_temp = df_temp.groupby(by='lp_id', as_index=False).sum()
for index, row in df_temp.iterrows():
if row['lp_id'] in product_name_dictionary.keys():
product_name_dictionary[row['lp_id']] += row['pct_access']
product_name_dictionary2[row['lp_id']] += row['engagement_index']
else:
product_name_dictionary[row['lp_id']] = row['pct_access']
product_name_dictionary2[row['lp_id']] = row['engagement_index']
def dict_val(x):
return x[1]
sorted_product_name_dictionary = sorted(product_name_dictionary.items(), key=dict_val, reverse=True)
product_df = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv', dtype={'LP ID': str})
i = 1
name_count = []
for key, val in sorted_product_name_dictionary[:11]:
product_name = product_df[product_df['LP ID'] == key]['Product Name'].values
if product_name != None:
name_count.append((product_name[0], val))
print(str(i) + ' :' + product_name[0])
i += 1 | code |
73096770/cell_11 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
product_name_dictionary = {}
product_name_dictionary2 = {}
for dirname, _, filenames in os.walk('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/'):
for filename in filenames:
engagement_data_path = os.path.join(dirname, filename)
df_temp = pd.read_csv(engagement_data_path, dtype={'lp_id': str})
df_temp = df_temp.fillna(0)
df_temp = df_temp.groupby(by='lp_id', as_index=False).sum()
for index, row in df_temp.iterrows():
if row['lp_id'] in product_name_dictionary.keys():
product_name_dictionary[row['lp_id']] += row['pct_access']
product_name_dictionary2[row['lp_id']] += row['engagement_index']
else:
product_name_dictionary[row['lp_id']] = row['pct_access']
product_name_dictionary2[row['lp_id']] = row['engagement_index']
def dict_val(x):
return x[1]
sorted_product_name_dictionary = sorted(product_name_dictionary.items(), key=dict_val, reverse=True)
product_df = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv', dtype={'LP ID': str})
i = 1
name_count = []
for key, val in sorted_product_name_dictionary[:11]:
product_name = product_df[product_df['LP ID'] == key]['Product Name'].values
if product_name != None:
name_count.append((product_name[0], val))
i += 1
name_count1 = pd.DataFrame([])
name_count1['products'] = [name[0] for name in name_count]
name_count1['percentage_of_access'] = [name[1] for name in name_count]
plt.figure(figsize=(20, 10))
plt.bar(name_count1['products'].values, name_count1['percentage_of_access'].values, color=['orange', 'blue'])
plt.xlabel('Top Product name')
plt.ylabel('Sum of percentage of access')
plt.show() | code |
73096770/cell_16 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
product_name_dictionary = {}
product_name_dictionary2 = {}
for dirname, _, filenames in os.walk('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/'):
for filename in filenames:
engagement_data_path = os.path.join(dirname, filename)
df_temp = pd.read_csv(engagement_data_path, dtype={'lp_id': str})
df_temp = df_temp.fillna(0)
df_temp = df_temp.groupby(by='lp_id', as_index=False).sum()
for index, row in df_temp.iterrows():
if row['lp_id'] in product_name_dictionary.keys():
product_name_dictionary[row['lp_id']] += row['pct_access']
product_name_dictionary2[row['lp_id']] += row['engagement_index']
else:
product_name_dictionary[row['lp_id']] = row['pct_access']
product_name_dictionary2[row['lp_id']] = row['engagement_index']
def dict_val(x):
return x[1]
sorted_product_name_dictionary = sorted(product_name_dictionary.items(), key=dict_val, reverse=True)
product_df = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv', dtype={'LP ID': str})
i = 1
name_count = []
for key, val in sorted_product_name_dictionary[:11]:
product_name = product_df[product_df['LP ID'] == key]['Product Name'].values
if product_name != None:
name_count.append((product_name[0], val))
i += 1
name_count1 = pd.DataFrame([])
name_count1['products'] = [name[0] for name in name_count]
name_count1['percentage_of_access'] = [name[1] for name in name_count]
def dict_val(x):
return x[1]
sorted_product_name_dictionary2 = sorted(product_name_dictionary2.items(), key=dict_val, reverse=True)
i = 1
name_count = []
for key, val in sorted_product_name_dictionary2[:11]:
product_name = product_df[product_df['LP ID'] == key]['Product Name'].values
if product_name != None:
name_count.append((product_name[0], val))
i += 1
name_count1 = pd.DataFrame([])
name_count1['products'] = [name[0] for name in name_count]
name_count1['pageload_per1000_student'] = [name[1] for name in name_count]
plt.figure(figsize=(20, 10))
plt.bar(name_count1['products'].values, name_count1['pageload_per1000_student'].values, color=['orange', 'blue'])
plt.xlabel('Top Product name')
plt.ylabel('Sum of district based Page Load Count Per 1000 Student')
plt.show() | code |
73096770/cell_14 | [
"image_output_1.png"
] | import os
import pandas as pd
product_name_dictionary = {}
product_name_dictionary2 = {}
for dirname, _, filenames in os.walk('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/engagement_data/'):
for filename in filenames:
engagement_data_path = os.path.join(dirname, filename)
df_temp = pd.read_csv(engagement_data_path, dtype={'lp_id': str})
df_temp = df_temp.fillna(0)
df_temp = df_temp.groupby(by='lp_id', as_index=False).sum()
for index, row in df_temp.iterrows():
if row['lp_id'] in product_name_dictionary.keys():
product_name_dictionary[row['lp_id']] += row['pct_access']
product_name_dictionary2[row['lp_id']] += row['engagement_index']
else:
product_name_dictionary[row['lp_id']] = row['pct_access']
product_name_dictionary2[row['lp_id']] = row['engagement_index']
def dict_val(x):
return x[1]
sorted_product_name_dictionary = sorted(product_name_dictionary.items(), key=dict_val, reverse=True)
product_df = pd.read_csv('/kaggle/input/learnplatform-covid19-impact-on-digital-learning/products_info.csv', dtype={'LP ID': str})
i = 1
name_count = []
for key, val in sorted_product_name_dictionary[:11]:
product_name = product_df[product_df['LP ID'] == key]['Product Name'].values
if product_name != None:
name_count.append((product_name[0], val))
i += 1
def dict_val(x):
return x[1]
sorted_product_name_dictionary2 = sorted(product_name_dictionary2.items(), key=dict_val, reverse=True)
i = 1
name_count = []
for key, val in sorted_product_name_dictionary2[:11]:
product_name = product_df[product_df['LP ID'] == key]['Product Name'].values
if product_name != None:
name_count.append((product_name[0], val))
print(str(i) + ' :' + product_name[0])
i += 1 | code |
105197461/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
test_df.describe() | code |
105197461/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test
merged_dataset_null_counts = df_train.merge(df_test, how='outer').dropna()
merged_dataset_null_counts | code |
105197461/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.head(n=10) | code |
105197461/cell_34 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test
object_columns = list(train_df.select_dtypes(include=['object']).columns)
numeric_columns = [item for item in train_df.columns if item not in object_columns + ['failure']]
(train_df[object_columns].dtypes, test_df[numeric_columns].dtypes) | code |
105197461/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test | code |
105197461/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
print(f'Trainin data percentage: {train_df.shape[0] / (train_df.shape[0] + test_df.shape[0])}')
print(f'Test data percentage: {test_df.shape[0] / (train_df.shape[0] + test_df.shape[0])}') | code |
105197461/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test
merged_dataset_null_counts = df_train.merge(df_test, how='outer').dropna()
merged_dataset_null_counts
df_train_temp = df_train.copy().rename(columns={'NullCountTrain': 'NullCount'})
df_train_temp['dataset'] = 'train'
df_train_temp = df_train_temp[df_train_temp['FeatureName'] != 'failure']
df_test_temp = df_test.copy().rename(columns={'NullCountTest': 'NullCount'})
df_test_temp['dataset'] = 'test'
df_temp = pd.concat([df_train_temp, df_test_temp], axis=0)
import seaborn as sns
sns.set(rc={'figure.figsize': (20, 4)})
sns.set_theme(style='whitegrid')
ax = sns.barplot(x='FeatureName', y='NullCount', hue='dataset', data=df_temp[df_temp['NullCount'] != 0])
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right') | code |
105197461/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test
merged_dataset_null_counts = df_train.merge(df_test, how='outer').dropna()
merged_dataset_null_counts
df_train_temp = df_train.copy().rename(columns={'NullCountTrain': 'NullCount'})
df_train_temp['dataset'] = 'train'
df_train_temp = df_train_temp[df_train_temp['FeatureName'] != 'failure']
df_test_temp = df_test.copy().rename(columns={'NullCountTest': 'NullCount'})
df_test_temp['dataset'] = 'test'
df_temp = pd.concat([df_train_temp, df_test_temp], axis=0)
object_columns = list(train_df.select_dtypes(include=['object']).columns)
numeric_columns = [item for item in train_df.columns if item not in object_columns + ['failure']]
columns_with_not_null_values = [col for col in train_df.notnull().columns if col != 'failure']
df_tmp = pd.DataFrame(train_df.isna().any(), columns=['isnan'])
NaN_columns = list(df_tmp[df_tmp['isnan'] == True].T.columns)
del df_tmp
label_column = 'failure'
def null_value_handling(df, columns, train=True, mean_values=None):
if train:
df[columns].fillna(pd.DataFrame(df[columns].mean()), axis='columns', inplace=True)
return (df[columns], df[columns].mean())
elif train == False and mean_values is not None:
return (df[columns].fillna(pd.DataFrame(mean_values), axis='columns'), None)
else:
raise 'Inputs are not correct!'
df_tmp = pd.DataFrame(train_df.isna().any(), columns=['isnan'])
train_df[NaN_columns], mean_values = null_value_handling(df=train_df, columns=NaN_columns, train=True)
test_df[NaN_columns], _ = null_value_handling(df=test_df, columns=NaN_columns, train=False, mean_values=mean_values) | code |
105197461/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes | code |
105197461/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105197461/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.describe() | code |
105197461/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
print(f'Train shape: {train_df.shape}')
print(f'Test shape: {test_df.shape}') | code |
105197461/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
test_df.dtypes
test_df.columns | code |
105197461/cell_43 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test
merged_dataset_null_counts = df_train.merge(df_test, how='outer').dropna()
merged_dataset_null_counts
df_train_temp = df_train.copy().rename(columns={'NullCountTrain': 'NullCount'})
df_train_temp['dataset'] = 'train'
df_train_temp = df_train_temp[df_train_temp['FeatureName'] != 'failure']
df_test_temp = df_test.copy().rename(columns={'NullCountTest': 'NullCount'})
df_test_temp['dataset'] = 'test'
df_temp = pd.concat([df_train_temp, df_test_temp], axis=0)
object_columns = list(train_df.select_dtypes(include=['object']).columns)
numeric_columns = [item for item in train_df.columns if item not in object_columns + ['failure']]
columns_with_not_null_values = [col for col in train_df.notnull().columns if col != 'failure']
df_tmp = pd.DataFrame(train_df.isna().any(), columns=['isnan'])
NaN_columns = list(df_tmp[df_tmp['isnan'] == True].T.columns)
del df_tmp
label_column = 'failure'
def null_value_handling(df, columns, train=True, mean_values=None):
if train:
df[columns].fillna(pd.DataFrame(df[columns].mean()), axis='columns', inplace=True)
return (df[columns], df[columns].mean())
elif train == False and mean_values is not None:
return (df[columns].fillna(pd.DataFrame(mean_values), axis='columns'), None)
else:
raise 'Inputs are not correct!'
df_tmp = pd.DataFrame(train_df.isna().any(), columns=['isnan'])
df_tmp = pd.DataFrame(train_df.isna().any(), columns=['isnan'])
NaN_columns = list(df_tmp[df_tmp['isnan'] == True].T.columns)
del df_tmp
print(f'Train - Colums with NaN values -> {NaN_columns}')
df_tmp = pd.DataFrame(test_df.isna().any(), columns=['isnan'])
NaN_columns = list(df_tmp[df_tmp['isnan'] == True].T.columns)
del df_tmp
print(f'Test - Colums with NaN values -> {NaN_columns}') | code |
105197461/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
train_df.columns | code |
105197461/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train | code |
105197461/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
train_df.dtypes
test_df.dtypes
train_df.columns
test_df.columns
train_null_count = train_df.isna().sum().to_dict()
df_train = pd.DataFrame([train_null_count.keys(), train_null_count.values()]).T
df_train.rename(columns={0: 'FeatureName', 1: 'NullCountTrain'}, inplace=True)
df_train
test_null_count = test_df.isna().sum().to_dict()
df_test = pd.DataFrame([test_null_count.keys(), test_null_count.values()]).T
df_test.rename(columns={0: 'FeatureName', 1: 'NullCountTest'}, inplace=True)
df_test
merged_dataset_null_counts = df_train.merge(df_test, how='outer').dropna()
merged_dataset_null_counts
df_train_temp = df_train.copy().rename(columns={'NullCountTrain': 'NullCount'})
df_train_temp['dataset'] = 'train'
df_train_temp = df_train_temp[df_train_temp['FeatureName'] != 'failure']
df_test_temp = df_test.copy().rename(columns={'NullCountTest': 'NullCount'})
df_test_temp['dataset'] = 'test'
df_temp = pd.concat([df_train_temp, df_test_temp], axis=0)
object_columns = list(train_df.select_dtypes(include=['object']).columns)
numeric_columns = [item for item in train_df.columns if item not in object_columns + ['failure']]
columns_with_not_null_values = [col for col in train_df.notnull().columns if col != 'failure']
df_tmp = pd.DataFrame(train_df.isna().any(), columns=['isnan'])
NaN_columns = list(df_tmp[df_tmp['isnan'] == True].T.columns)
del df_tmp
label_column = 'failure'
print(f'Colums -> {columns_with_not_null_values}\nLabel Column -> {label_column}\n NaN Columns -> {NaN_columns}') | code |
105197461/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-aug-2022/sample_submission.csv')
test_df.dtypes | code |
105204825/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_comp = pd.read_csv('../input/big-data-derby-2022/nyra_2019_complete.csv')
df_race = pd.read_csv('../input/big-data-derby-2022/nyra_race_table.csv')
df_stsrt = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv')
df_track = pd.read_csv('../input/big-data-derby-2022/nyra_tracking_table.csv')
df_race.head(2) | code |
105204825/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | df_tracj.head(2) | code |
105204825/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_comp = pd.read_csv('../input/big-data-derby-2022/nyra_2019_complete.csv')
df_race = pd.read_csv('../input/big-data-derby-2022/nyra_race_table.csv')
df_stsrt = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv')
df_track = pd.read_csv('../input/big-data-derby-2022/nyra_tracking_table.csv') | code |
105204825/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105204825/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_comp = pd.read_csv('../input/big-data-derby-2022/nyra_2019_complete.csv')
df_race = pd.read_csv('../input/big-data-derby-2022/nyra_race_table.csv')
df_stsrt = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv')
df_track = pd.read_csv('../input/big-data-derby-2022/nyra_tracking_table.csv')
df_comp.head(2) | code |
105204825/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_comp = pd.read_csv('../input/big-data-derby-2022/nyra_2019_complete.csv')
df_race = pd.read_csv('../input/big-data-derby-2022/nyra_race_table.csv')
df_stsrt = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv')
df_track = pd.read_csv('../input/big-data-derby-2022/nyra_tracking_table.csv')
df_stsrt.head(2) | code |
1005892/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
train_df.info() | code |
1005892/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import SGDClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log')
clf.fit(train_x, train_y)
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns)
train_x.columns
test_pred = clf.predict_proba(test_xx)
feat_coef = list(zip(train_x.columns, clf.coef_[0]))
feat_coef.sort(key=lambda x: -x[1])
train_x.columns[6] | code |
1005892/cell_20 | [
"text_html_output_1.png"
] | from sklearn.linear_model import SGDClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log')
clf.fit(train_x, train_y)
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns)
test_pred = clf.predict_proba(test_xx)
test_pred | code |
1005892/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1005892/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import SGDClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log')
clf.fit(train_x, train_y) | code |
1005892/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns)
test_xx | code |
1005892/cell_18 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import SGDClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log')
clf.fit(train_x, train_y)
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns)
test_pred = clf.predict_proba(test_xx) | code |
1005892/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age'])) | code |
1005892/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns) | code |
1005892/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns)
train_x.columns | code |
1005892/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
train_df[['Sex', 'Survived']].groupby(['Sex', 'Survived']).size() | code |
1005892/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import SGDClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
model_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']
train_x = pd.get_dummies(train_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
train_y = train_df['Survived']
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log')
clf.fit(train_x, train_y)
test_df = pd.read_csv('../input/test.csv')
test_x = pd.get_dummies(test_df[model_cols], columns=['Pclass', 'Sex', 'SibSp', 'Cabin', 'Embarked'])
test_x['Age'].fillna(train_x['Age'].median(), inplace=True)
test_x['Fare'].fillna(train_x['Fare'].median(), inplace=True)
train_cols = train_x.columns
test_cols_all = test_x.columns
test_cols = [x for x in train_cols if x in test_cols_all]
test_xx = test_x[test_cols]
lc = len(train_x.columns)
j = 0
for i in range(lc):
if train_cols[i] == test_cols[j]:
j += 1
continue
else:
test_xx.insert(i, train_cols[i], 0)
test_xx.columns[1:].append(pd.Index(['Age']))
def cmp(a, b):
return (a > b) - (a < b)
cmp(test_xx.columns, train_x.columns)
train_x.columns
test_pred = clf.predict_proba(test_xx)
feat_coef = list(zip(train_x.columns, clf.coef_[0]))
feat_coef.sort(key=lambda x: -x[1])
feat_coef | code |
106202249/cell_1 | [
"text_plain_output_1.png"
] | !pip install -q timm
!pip install -q einops | code |
129021628/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
for col in df1.columns:
i = i + 1
plt.tight_layout()
df1 = Data.drop(['Country'], axis=1)
df1.head() | code |
129021628/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
is_nan = df.isna().sum().to_frame(name='Count of nan')
is_nan | code |
129021628/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df | code |
129021628/cell_25 | [
"text_html_output_10.png",
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_html_output_9.png",
"text_html_output_8.png",
"text_html_output_3.png",
"text_html_output_7.png"
] | pip install kneed | code |
129021628/cell_20 | [
"image_output_1.png"
] | from termcolor import colored
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
is_nan = df.isna().sum().to_frame(name='Count of nan')
is_nan
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
for col in df1.columns:
i = i + 1
for i in df.drop('Country', axis=1).columns:
fig = px.choropleth(df, locationmode='country names', locations='Country', title=i + ' per Country in the World', color=i, color_continuous_scale='Greens')
fig.update_geos(fitbounds='locations', visible=True)
for i in df.drop('Country', axis=1).columns:
fig = px.choropleth(df, locationmode='country names', locations='Country', color=i, title=i + ' per country in Africa continent', scope='africa', color_continuous_scale='YlOrBr')
fig.update_geos(fitbounds='locations', visible=True)
fig.show(engine='kaleido') | code |
129021628/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from termcolor import colored
import plotly.express as px
import kaleido
from sklearn.preprocessing import StandardScaler
import matplotlib.image as mpimg
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.metrics import silhouette_score | code |
129021628/cell_26 | [
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_html_output_9.png",
"text_html_output_1.png",
"text_html_output_8.png",
"text_html_output_3.png",
"text_html_output_7.png"
] | from kneed import KneeLocator
from sklearn.cluster import KMeans
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
for col in df1.columns:
i = i + 1
plt.tight_layout()
df1 = Data.drop(['Country'], axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_features = scaler.fit_transform(df1)
from sklearn.cluster import KMeans
List = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, init='random', max_iter=300, random_state=1, n_init=10)
kmeans.fit(scaled_features)
List.append(kmeans.inertia_)
from kneed import KneeLocator
kl = KneeLocator(range(1, 11), List, curve='convex', direction='decreasing')
kl.elbow
plt.style.use('fivethirtyeight')
plt.plot(range(1, 11), List)
plt.xticks(range(1, 11))
plt.xlabel('Number of Clusters', labelpad=20)
plt.ylabel('Interia')
plt.axvline(x=kl.elbow, color='b', label='axvline - full height', ls='--')
plt.show() | code |
129021628/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
df.info() | code |
129021628/cell_19 | [
"image_output_1.png"
] | from termcolor import colored
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
is_nan = df.isna().sum().to_frame(name='Count of nan')
is_nan
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
for col in df1.columns:
i = i + 1
for i in df.drop('Country', axis=1).columns:
fig = px.choropleth(df, locationmode='country names', locations='Country', title=i + ' per Country in the World', color=i, color_continuous_scale='Greens')
fig.update_geos(fitbounds='locations', visible=True)
fig.show(engine='kaleido') | code |
129021628/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129021628/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
for col in df1.columns:
i = i + 1
plt.figure(figsize=(10, 5), dpi=100)
sns.heatmap(df1.corr(), annot=True, cmap='YlOrBr')
plt.suptitle(f'Correlation Between Features')
plt.tight_layout()
plt.show() | code |
129021628/cell_15 | [
"text_html_output_1.png"
] | from termcolor import colored
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
is_nan = df.isna().sum().to_frame(name='Count of nan')
is_nan
print(colored(f'Number of dupilcated data: {df.duplicated().sum()}')) | code |
129021628/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
columns = ['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp']
plt.figure(figsize=(15, 20))
for i, col in enumerate(df1.columns):
ax = plt.subplot(5, 2, i + 1)
sns.boxplot(x=columns[i], data=df1, color='magenta')
plt.title(df1.columns[i])
plt.tight_layout()
plt.show() | code |
129021628/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
plt.figure(figsize=(20, 40))
for col in df1.columns:
plt.subplot(5, 2, i)
sns.distplot(df1[col], hist=True, hist_kws={'edgecolor': 'w', 'linewidth': 3}, kde_kws={'linewidth': 3})
sns.rugplot(df1[col], height=0.1, clip_on=False, color='red')
i = i + 1
plt.show() | code |
129021628/cell_14 | [
"text_plain_output_1.png"
] | from termcolor import colored
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
is_nan = df.isna().sum().to_frame(name='Count of nan')
is_nan
print(colored(f'Number of dupilcated data: {df.duplicated().sum()}')) | code |
129021628/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T | code |
129021628/cell_27 | [
"text_html_output_1.png"
] | from kneed import KneeLocator
from sklearn.cluster import KMeans
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import warnings
import warnings
warnings.simplefilter('ignore')
plt.style.use('seaborn')
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
# Noise detection
df1 = pd.DataFrame(Data, columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp'])
columns = ['child_mort','exports','health','imports','income','inflation','life_expec','total_fer','gdpp']
plt.figure(figsize=(15,20))
for i, col in enumerate (df1.columns):
ax= plt.subplot(5,2,i+1)
sns.boxplot(x= columns[i], data=df1, color='magenta' )
plt.title(df1.columns[i])
plt.tight_layout()
plt.show()
df1 = pd.DataFrame(Data, columns=['child_mort', 'exports', 'health', 'imports', 'income', 'inflation', 'life_expec', 'total_fer', 'gdpp'])
i = 1
for col in df1.columns:
i = i + 1
plt.tight_layout()
df1 = Data.drop(['Country'], axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_features = scaler.fit_transform(df1)
from sklearn.cluster import KMeans
List = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, init='random', max_iter=300, random_state=1, n_init=10)
kmeans.fit(scaled_features)
List.append(kmeans.inertia_)
from kneed import KneeLocator
kl = KneeLocator(range(1, 11), List, curve='convex', direction='decreasing')
kl.elbow
plt.style.use('fivethirtyeight')
plt.xticks(range(1, 11))
from sklearn.metrics import silhouette_score
silhouette_coefficients = []
for k in range(2, 11):
kmeans = KMeans(n_clusters=k, init='random', random_state=1)
kmeans.fit(scaled_features)
score = silhouette_score(scaled_features, kmeans.labels_)
silhouette_coefficients.append(score)
plt.style.use('fivethirtyeight')
plt.plot(range(2, 11), silhouette_coefficients)
plt.xticks(range(2, 11))
plt.xlabel('Number of Clusters')
plt.ylabel('silhouette coefficients')
plt.show()
print('max silhouette score:', max(silhouette_coefficients)) | code |
129021628/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
Data = pd.read_csv('/kaggle/input/country-dataset/Country_Dataset.csv')
df = pd.DataFrame(Data)
df
df.describe().T
df.describe(include=['object']) | code |
129021628/cell_5 | [
"image_output_1.png"
] | pip install kaleido | code |
16111583/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import unittest
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
import time
import unittest
t = unittest.TestCase()
SPACE_DIMENSIONS = 2
class Points(np.ndarray):
"""ndarray sized (SPACE_DIMENSIONS,...) with named coordinates x,y"""
@staticmethod
def of(coords):
p = np.asarray(coords).view(Points)
assert p.shape[0] == SPACE_DIMENSIONS
return p
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
class Lines(np.ndarray):
"""ndarray shaped (3,...) with named line parameters a,b,c"""
@staticmethod
def of(abc):
lp = np.asarray(abc).view(Lines)
assert lp.shape[0] == 3
return lp
@property
def a(self):
return self[0]
@property
def b(self):
return self[1]
@property
def c(self):
return self[2]
def intersections(self, hyperplanes) -> Points:
"""
https://stackoverflow.com/a/20679579/2082707
answered Dec 19 '13 at 10:46 by rook
Adapted for numpy matrix operations by Subota
Intersection points of lines from the first set with hyperplanes from the second set.
Currently only 2D sapce supported, e.g. the second lanes is lines, too.
@hyperplanes parametrical equation coeffs. For 2D it is also Lines
@return array of intersection coordinates as Points, sized:
- SPACE_DIMENSIONS for intersection coordinates
- n1 for the number of lines passed in L1
- n2 for the number of lines passed in L2
"""
l1 = np.reshape(self, (*self.shape, 1))
l2 = hyperplanes
d = l1.a * l2.b - l1.b * l2.a
dx = l1.c * l2.b - l1.b * l2.c
dy = l1.a * l2.c - l1.c * l2.a
d[d == 0.0] = np.nan
x = dx / d
y = dy / d
return Points.of((x, y))
class LineSegments(np.ndarray):
"""Wrapper around ndarray((2,SPACE_DIMENSIONS)) to access endPoint1, endPoint2 and coordinates x,y by names"""
@staticmethod
def of(point_coords):
ls = np.asarray(point_coords).view(LineSegments)
assert ls.shape[0] == 2
assert ls.shape[1] == SPACE_DIMENSIONS
return ls
@property
def endPoint1(self):
return Points.of(self[0])
@property
def endPoint2(self):
return Points.of(self[1])
@property
def x(self):
return self[:, 0]
@property
def y(self):
return self[:, 1]
def length(self) -> np.array:
dif = self.endPoint1 - self.endPoint2
return np.sqrt(dif.x * dif.x + dif.y * dif.y).view(np.ndarray)
def lines(self) -> Lines:
"""
https://stackoverflow.com/a/20679579/2082707
answered Dec 19 '13 at 10:46 by rook
Adapted for numpy matrix operations by Subota
Calculates the line equation Ay + Bx - C = 0, given two points on a line.
Horizontal and vertical lines are Ok
@return returns an array of Lines parameters sized:
- 3 for the parameters A, B, and C
- n for the number of lines calculated
"""
p1, p2 = (self.endPoint1, self.endPoint2)
a = p1.y - p2.y
b = p2.x - p1.x
c = -(p1.x * p2.y - p2.x * p1.y)
return Lines.of((a, b, c))
def intersections(self, other) -> Points:
"""
Returns intersection points for line sets,
along with the true/false matrix for do intersections lie within the segments or not.
@other LineSegments to find intersections with. Sized:
- 2 for the endPoint1 and endPoint2
- SPACE_DIMENSIONS
- n1 for the number of segments in the first set
Generally speaking these must be hyper-planes in N-dimensional space
@return a tuple with two elements
0. boolean matrix sized(n1,n2), True the intersection to fall within the segments, False otherwise.
1. intersection Points sized (SPACE_DIMENSIONS, n1, n2)
"""
s1, s2 = (self, other)
l1, l2 = (self.lines(), other.lines())
il = l1.intersections(l2)
s1 = s1.reshape((2, SPACE_DIMENSIONS, -1, 1))
s1p1, s1p2 = (s1.endPoint1, s1.endPoint2)
s2p1, s2p2 = (s2.endPoint1, s2.endPoint2)
ROUNDING_THRESHOLD = np.array(1e-10)
which_intersect = (il.x <= np.maximum(s1p1.x, s1p2.x) + ROUNDING_THRESHOLD) & (il.x >= np.minimum(s1p1.x, s1p2.x) - ROUNDING_THRESHOLD) & (il.y <= np.maximum(s1p1.y, s1p2.y) + ROUNDING_THRESHOLD) & (il.y >= np.minimum(s1p1.y, s1p2.y) - ROUNDING_THRESHOLD) & (il.x <= np.maximum(s2p1.x, s2p2.x) + ROUNDING_THRESHOLD) & (il.x >= np.minimum(s2p1.x, s2p2.x) - ROUNDING_THRESHOLD) & (il.y <= np.maximum(s2p1.y, s2p2.y) + ROUNDING_THRESHOLD) & (il.y >= np.minimum(s2p1.y, s2p2.y) - ROUNDING_THRESHOLD)
return (which_intersect, il)
t.assertTrue(np.allclose(LineSegments.of([[[-1.0], [-1]], [[1], [1]]]).lines().flat, np.array([-2, 2, 0])))
t.assertTrue(np.allclose(LineSegments.of([[[0.0], [-1]], [[0], [1]]]).lines().flat, np.array([-2, 0, 0])))
t.assertTrue(np.allclose(LineSegments.of([[[3.0], [1]], [[-4], [1]]]).lines().flat, np.array([0, -7, -7])))
t.assertEqual(LineSegments.of([Points.of([0, 0]), Points.of([3, 4])]).length(), 5)
def demo_intersect_lines():
seg1 = LineSegments.of(st.uniform.rvs(size=(2, SPACE_DIMENSIONS, 2), random_state=19))
seg2 = LineSegments.of(st.uniform.rvs(size=(2, SPACE_DIMENSIONS, 3), random_state=15) + 1)
l1, l2 = (seg1.lines(), seg2.lines())
i = l1.intersections(l2)
plt.plot(seg1.x, seg1.y, '-', c='green')
plt.plot(seg2.x, seg2.y, '-', c='blue')
plt.plot(i.x, i.y, '+', c='red', markersize=20)
plt.title('Extended Line Intersections')
plt.axis('off')
def demo_intersect_segments():
seg1 = LineSegments.of(st.uniform.rvs(size=(2, SPACE_DIMENSIONS, 4), random_state=1))
seg2 = LineSegments.of(st.uniform.rvs(size=(2, SPACE_DIMENSIONS, 5), random_state=2))
plt.plot(seg1.x, seg1.y, '-', c='black')
plt.plot(seg2.x, seg2.y, '-', c='lightgrey')
w, i = seg1.intersections(seg2)
plt.plot(i.x[w], i.y[w], '+', c='red', markersize=20)
plt.title('Segment Intersections')
plt.axis('off')
f, ax = plt.subplots(ncols=2)
f.set_size_inches(12, 4)
plt.sca(ax[0])
demo_intersect_lines()
plt.sca(ax[1])
demo_intersect_segments() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.