path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
90108947/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import matplotlib.pylab as plt
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
data_plot = pd.read_csv(data, sep=',', parse_dates=['Date'], index_col='Date')
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=1, epochs=1)
test_data = scaled_data[training_data_len - 60:, :]
X_test = []
Y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
X_test.append(test_data[i - 60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predictions = model.predict(X_test)
predictions = scaler.inverse_transform(predictions)
rmse = np.sqrt(np.mean(predictions - Y_test) ** 2)
data_2 = df
new_df_2 = data_2.filter(['Close'])
last_60_days = new_df_2[-60:].values
last_60_days_scaled = scaler.transform(last_60_days)
X_test_2 = []
X_test_2.append(last_60_days_scaled)
X_test_2 = np.array(X_test_2)
X_test_2 = np.reshape(X_test_2, (X_test_2.shape[0], X_test_2.shape[1], 1))
final_y_predict = model.predict(X_test_2)
final_y_predict = scaler.inverse_transform(final_y_predict)
sample_sub = pd.read_csv('/kaggle/input/110-1-ntut-dl-app-hw3/nasdaq_predict.csv')
sample_sub['Expected'] = final_y_predict
sample_sub.to_csv('submission1.csv', index=False)
sample_sub.head() | code |
90108947/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.info() | code |
90108947/cell_23 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=1, epochs=1)
test_data = scaled_data[training_data_len - 60:, :]
X_test = []
Y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
X_test.append(test_data[i - 60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predictions = model.predict(X_test)
predictions = scaler.inverse_transform(predictions)
rmse = np.sqrt(np.mean(predictions - Y_test) ** 2)
data_2 = df
new_df_2 = data_2.filter(['Close'])
last_60_days = new_df_2[-60:].values
last_60_days_scaled = scaler.transform(last_60_days)
X_test_2 = []
X_test_2.append(last_60_days_scaled)
X_test_2 = np.array(X_test_2)
X_test_2 = np.reshape(X_test_2, (X_test_2.shape[0], X_test_2.shape[1], 1))
final_y_predict = model.predict(X_test_2)
final_y_predict = scaler.inverse_transform(final_y_predict)
print(final_y_predict) | code |
90108947/cell_20 | [
"image_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=1, epochs=1)
test_data = scaled_data[training_data_len - 60:, :]
X_test = []
Y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
X_test.append(test_data[i - 60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predictions = model.predict(X_test)
predictions = scaler.inverse_transform(predictions)
rmse = np.sqrt(np.mean(predictions - Y_test) ** 2)
rmse | code |
90108947/cell_6 | [
"text_html_output_1.png"
] | import matplotlib.pylab as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
data_plot = pd.read_csv(data, sep=',', parse_dates=['Date'], index_col='Date')
plt.figure(figsize=(16, 8))
plt.plot(data_plot['Close'])
plt.xlabel('Dates', fontsize=18)
plt.ylabel('Closing Prices', fontsize=18)
plt.show() | code |
90108947/cell_11 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
print(X_train.shape)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape | code |
90108947/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90108947/cell_7 | [
"text_plain_output_1.png"
] | import math
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len | code |
90108947/cell_8 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import math
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data | code |
90108947/cell_3 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df | code |
90108947/cell_24 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pylab as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
data_plot = pd.read_csv(data, sep=',', parse_dates=['Date'], index_col='Date')
sample_sub = pd.read_csv('/kaggle/input/110-1-ntut-dl-app-hw3/nasdaq_predict.csv')
sample_sub.head() | code |
90108947/cell_14 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=1, epochs=1) | code |
90108947/cell_22 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import matplotlib.pylab as plt
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
data_plot = pd.read_csv(data, sep=',', parse_dates=['Date'], index_col='Date')
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=1, epochs=1)
test_data = scaled_data[training_data_len - 60:, :]
X_test = []
Y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
X_test.append(test_data[i - 60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predictions = model.predict(X_test)
predictions = scaler.inverse_transform(predictions)
train = df[:training_data_len]
valid = df[training_data_len:]
valid['Predictions'] = predictions
valid | code |
90108947/cell_12 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape
new_df = df.filter(['Close'])
dataset = new_df.values
training_data_len = math.ceil(len(dataset) * 0.8)
training_data_len
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
train_data = scaled_data[0:training_data_len, :]
X_train = []
Y_train = []
for i in range(60, len(train_data)):
X_train.append(train_data[i - 60:i, 0])
Y_train.append(train_data[i, 0])
X_train, Y_train = (np.array(X_train), np.array(Y_train))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1)) | code |
90108947/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = '/kaggle/input/110-1-ntut-dl-app-hw3/IXIC.csv'
df = pd.read_csv(data)
df
df.shape | code |
17115461/cell_6 | [
"text_plain_output_1.png"
] | import os
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
print(os.listdir(test_dir)) | code |
17115461/cell_8 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_width, image_height = (150, 150)
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=128, class_mode='categorical', target_size=(image_width, image_height))
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=128, class_mode='categorical', target_size=(image_width, image_height)) | code |
17115461/cell_15 | [
"text_plain_output_1.png"
] | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Dropout, Flatten, Dense, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import tensorflow as tf
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_width, image_height = (150, 150)
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=128, class_mode='categorical', target_size=(image_width, image_height))
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=128, class_mode='categorical', target_size=(image_width, image_height))
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Flatten, Dense, BatchNormalization
model_using_vgg16 = tf.keras.models.Sequential([vgg16, Flatten(), Dense(512, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(64, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(6, activation='softmax')])
model_using_vgg16.summary()
model_using_vgg16.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model_using_vgg16.fit_generator(train_generator, validation_data=test_generator, epochs=10) | code |
17115461/cell_3 | [
"text_plain_output_1.png"
] | import os
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
list_files('../input') | code |
17115461/cell_17 | [
"text_plain_output_1.png"
] | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Dropout, Flatten, Dense, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import os
import tensorflow as tf
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
test_dir = '../input/seg_test/seg_test/'
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_width, image_height = (150, 150)
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=128, class_mode='categorical', target_size=(image_width, image_height))
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=128, class_mode='categorical', target_size=(image_width, image_height))
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Flatten, Dense, BatchNormalization
model_using_vgg16 = tf.keras.models.Sequential([vgg16, Flatten(), Dense(512, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(64, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(6, activation='softmax')])
model_using_vgg16.summary()
model_using_vgg16.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history = model_using_vgg16.fit_generator(train_generator, validation_data=test_generator, epochs=10)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss') | code |
17115461/cell_10 | [
"text_plain_output_1.png"
] | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary() | code |
17115461/cell_12 | [
"text_plain_output_1.png"
] | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Dropout, Flatten, Dense, BatchNormalization
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
vgg16 = VGG16(input_shape=(image_width, image_height, 3), include_top=False, weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
vgg16.summary()
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Flatten, Dense, BatchNormalization
model_using_vgg16 = tf.keras.models.Sequential([vgg16, Flatten(), Dense(512, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(64, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(6, activation='softmax')])
model_using_vgg16.summary() | code |
17115461/cell_5 | [
"text_plain_output_1.png"
] | import os
import os
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
subindent = ' ' * 4 * (level + 1)
train_dir = '../input/seg_train/seg_train/'
print(os.listdir(train_dir)) | code |
330287/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
plus = sum(df_train.loc[:, 'outcome'] == 0)
minus = sum(df_train.loc[:, 'outcome'] == 1)
def data_cleanser(data, is_train):
def adjust_dates(dates, diff):
return dates - diff
if is_train:
df_dates = data['date_x']
diff = df_dates.max() - df_dates.min()
diff2 = df_dates.max() - pd.Timestamp(pd.datetime.now().date())
diffdays = diff + diff2
data['adj_date'] = adjust_dates(data['date_x'], diffdays)
return data.drop(['date_x'], axis=1)
data_cleanser(df_train, True).head() | code |
330287/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
df_train.head(2)
plus = sum(df_train.loc[:, 'outcome'] == 0)
minus = sum(df_train.loc[:, 'outcome'] == 1)
print(plus, minus)
print(df_train['outcome'].unique()) | code |
330287/cell_7 | [
"image_output_1.png"
] | import brewer2mpl
import matplotlib.pyplot as plt
set2 = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors
font = {'family': 'sans-serif', 'color': 'teal', 'weight': 'bold', 'size': 18}
plt.rc('font', family='serif')
plt.rc('font', size=16)
plt.rc('font', weight='bold')
plt.style.use('seaborn-dark-palette')
print(plt.style.available)
fig_size = plt.rcParams['figure.figsize']
fig_size[0] = 6
fig_size[1] = 6
plt.rcParams['figure.figsize'] = fig_size | code |
330287/cell_8 | [
"text_html_output_1.png"
] | from matplotlib import rcParams
import brewer2mpl
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
plus = sum(df_train.loc[:, 'outcome'] == 0)
minus = sum(df_train.loc[:, 'outcome'] == 1)
set2 = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors
font = {'family': 'sans-serif', 'color': 'teal', 'weight': 'bold', 'size': 18}
plt.rc('font', family='serif')
plt.rc('font', size=16)
plt.rc('font', weight='bold')
plt.style.use('seaborn-dark-palette')
fig_size = plt.rcParams['figure.figsize']
fig_size[0] = 6
fig_size[1] = 6
plt.rcParams['figure.figsize'] = fig_size
from matplotlib import rcParams
rcParams['font.size'] = 12
rcParams['text.color'] = 'black'
piechart = plt.pie((minus, plus), labels=('plus', 'minus'), shadow=False, colors=('teal', 'crimson'), explode=(0.08, 0.08), startangle=90, autopct='%1.1f%%')
plt.axis('equal')
plt.title('Animal Shelter Outcome Train Data', y=1.08, fontdict=font)
plt.tight_layout()
plt.savefig('TWP-Status-Groups-train.png', bbox_inches='tight') | code |
330287/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/act_train.csv', parse_dates=['date'])
test = pd.read_csv('../input/act_test.csv', parse_dates=['date'])
ppl = pd.read_csv('../input/people.csv', parse_dates=['date'])
df_train = pd.merge(train, ppl, on='people_id')
df_test = pd.merge(test, ppl, on='people_id')
del train, test, ppl
for d in ['date_x', 'date_y']:
print('Start of ' + d + ': ' + str(df_train[d].min().date()))
print(' End of ' + d + ': ' + str(df_train[d].max().date()))
print('Range of ' + d + ': ' + str(df_train[d].max() - df_train[d].min()) + '\n') | code |
74049529/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74049529/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from datetime import datetime
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
date1str = str(input('Enter date(yyyy-mm-dd): '))
date1 = datetime.strptime(date1str, '%Y-%m-%d')
date1after = date1 + pd.Timedelta(days=1)
print('Date after ', date1, ' is ', date1after)
date1before = date1 - pd.Timedelta(days=1)
print('Date before ', date1, ' is ', date1before)
date2str = str(input('Enter date(yyyy-mm-dd): '))
date2 = datetime.strptime(date2str, '%Y-%m-%d')
print('Difference between ', date2, ' and ', date1, ' is ', date2 - date1) | code |
88093705/cell_23 | [
"text_plain_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.shape
data.info() | code |
88093705/cell_20 | [
"text_plain_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.shape | code |
88093705/cell_50 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_4.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_html_output_2.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_html_output_1.png",
"image_output_6.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"text_html_output_3.png"
] | from IPython.core.display import HTML
from IPython.display import Markdown
from scipy.stats import norm, skew, kurtosis
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.stats as stats
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.shape
"""Type des données de nos variables."""
bold('**Type des données de nos variables:**')
listeInt = ''
listeFloat = ''
listeObj = ''
for col in data.columns:
if data[col].dtype in ['float64']:
listeFloat += col + ','
if data[col].dtype in ['int64']:
listeInt += col + ','
if data[col].dtype in ['object']:
listeObj += col + ','
data1 = data.iloc[:, 2:]
categorical_indexes = [0, 1, 3, 4] + list(range(6, 20))
data1.iloc[:, categorical_indexes] = data1.iloc[:, categorical_indexes].astype('category')
def graph_unitaire(data1,nom_colonne,proba):
f, (ax1, ax2) = plt.subplots(1,2,figsize=(20,8))
sns.kdeplot(data1[nom_colonne],ax = ax1,color ='blue',shade=True,
label=("Skewness : %.2f"%(data1[nom_colonne].skew()),
"Kurtosis: %.2f"%(data1[nom_colonne].kurtosis())))
ax1.set_xlabel(nom_colonne,color='black',fontsize=12)
ax1.set_title(nom_colonne + ' Kdeplot',fontsize=14)
ax1.axvline(data1[nom_colonne].mean() , color ='g',linestyle = '--')
ax1.legend(loc ='upper right',fontsize=12,ncol=2)
sns.distplot(data1[nom_colonne] , fit=norm,ax = ax2);
ax2.set_xlabel(nom_colonne,color='black',fontsize=12)
ax2.set_title(nom_colonne + ' distribution',fontsize=14)
ax2.axvline(data1[nom_colonne].mean() , color ='g',linestyle = '--')
(mu, sigma) = norm.fit(data1[nom_colonne])
ax2.legend(['Normal dist. ($\mu=$ {:.2f} et $\sigma=$ {:.2f} )'.format(mu, sigma)],loc ='upper right',fontsize=12,ncol=2)
sns.despine()
plt.show()
if proba==True:
graph_duo(data1,nom_colonne)
return(data1[nom_colonne].skew(),data1[nom_colonne].kurtosis())
def graph_duo(data1,nom_colonne):
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(data1[nom_colonne], plot=plt)
plt.show()
""" On parcours les différentes colonnes """
for col in data1.columns:
' Uniquement les colonnes numériques '
if data1[col].dtype in ['int64', 'float64']:
display(HTML('<strong>Analyse de la variable ' + col + '</strong>'))
skew1, kurto1 = graph_unitaire(data1, col, True) | code |
88093705/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88093705/cell_32 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.shape
"""Type des données de nos variables."""
bold('**Type des données de nos variables:**')
listeInt = ''
listeFloat = ''
listeObj = ''
for col in data.columns:
if data[col].dtype in ['float64']:
listeFloat += col + ','
if data[col].dtype in ['int64']:
listeInt += col + ','
if data[col].dtype in ['object']:
listeObj += col + ','
display('Type flottant : ' + listeFloat)
display('Type Int : ' + listeInt)
display('Type Objet : ' + listeObj) | code |
88093705/cell_15 | [
"text_html_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.tail() | code |
88093705/cell_38 | [
"text_plain_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.shape
"""Type des données de nos variables."""
bold('**Type des données de nos variables:**')
listeInt = ''
listeFloat = ''
listeObj = ''
for col in data.columns:
if data[col].dtype in ['float64']:
listeFloat += col + ','
if data[col].dtype in ['int64']:
listeInt += col + ','
if data[col].dtype in ['object']:
listeObj += col + ','
data1 = data.iloc[:, 2:]
categorical_indexes = [0, 1, 3, 4] + list(range(6, 20))
data1.iloc[:, categorical_indexes] = data1.iloc[:, categorical_indexes].astype('category')
data1.info() | code |
88093705/cell_46 | [
"text_html_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.shape
"""Type des données de nos variables."""
bold('**Type des données de nos variables:**')
listeInt = ''
listeFloat = ''
listeObj = ''
for col in data.columns:
if data[col].dtype in ['float64']:
listeFloat += col + ','
if data[col].dtype in ['int64']:
listeInt += col + ','
if data[col].dtype in ['object']:
listeObj += col + ','
data1 = data.iloc[:, 2:]
categorical_indexes = [0, 1, 3, 4] + list(range(6, 20))
data1.iloc[:, categorical_indexes] = data1.iloc[:, categorical_indexes].astype('category')
data1.describe().transpose() | code |
88093705/cell_14 | [
"text_html_output_1.png"
] | from IPython.display import Markdown
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
"""Personnalisation de la visualisation"""
plt.style.use('bmh')
sns.set_style({'axes.grid': False})
'On peut utiliser le gras , souligné etc avec markedown'
from IPython.display import Markdown
def bold(string):
pass
pd.options.display.max_rows = 150
df1 = pd.read_csv('../input/projet-data-mining/test.csv')
df2 = pd.read_csv('../input/projet-data-mining/train.csv')
data = pd.concat([df1, df2], axis=0, ignore_index=True)
data.head() | code |
326100/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt'])
global_temperatures[global_temperatures.index.year > 2000]['LandAverageTemperature'].plot(figsize=(13, 7)) | code |
326100/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt'])
global_temperatures.groupby(global_temperatures.index.year)['LandAverageTemperature'].mean().plot(figsize=(13, 7)) | code |
326100/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
from matplotlib import pyplot as plt
import seaborn as sbn | code |
326100/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt'])
global_temperatures.groupby(global_temperatures.index.year)['LandAverageTemperatureUncertainty'].mean().plot(figsize=(13, 7)) | code |
326100/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt'])
print(global_temperatures.info()) | code |
73074503/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape | code |
73074503/cell_25 | [
"image_output_11.png",
"image_output_14.png",
"image_output_13.png",
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_12.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
y.dtype
for col in features.select_dtypes('object'):
plt.figure()
x = features[col].value_counts()
features[col].value_counts().plot.pie(autopct=lambda x: str(round(x, 2)) + '%', pctdistance=2, labeldistance=1.4, shadow=True) | code |
73074503/cell_33 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | X_train.head() | code |
73074503/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
y.dtype
plt.figure(figsize=(20, 10))
plt.plot(y.values) | code |
73074503/cell_39 | [
"text_html_output_1.png"
] | from sklearn.ensemble import BaggingRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor
from sklearn.ensemble import BaggingRegressor
regr = BaggingRegressor(base_estimator=XGBRegressor(), n_estimators=10, random_state=0).fit(X_train, y_train)
preds_valid = regr.predict(X_valid)
print(mean_squared_error(y_valid, preds_valid, squared=False)) | code |
73074503/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
y.dtype | code |
73074503/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.head() | code |
73074503/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
plt.figure(figsize=(20, 10))
sns.heatmap(train.isna(), cbar=False) | code |
73074503/cell_38 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from xgboost import XGBRegressor
params_xgb = {'lambda': 0.7044156083795233, 'alpha': 9.681476940192473, 'colsample_bytree': 0.3, 'subsample': 0.8, 'learning_rate': 0.015, 'max_depth': 3, 'min_child_weight': 235, 'random_state': 48, 'n_estimators': 30000}
XGBRegressor_model = XGBRegressor(**params_xgb)
'XGBRegressor_model.fit(X_train, y_train, early_stopping_rounds=200, \n eval_set=[(X_valid, y_valid)], \n verbose=False)\npreds_valid = XGBRegressor_model.predict(X_valid)\nprint(mean_squared_error(y_valid, preds_valid, squared=False))' | code |
73074503/cell_43 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
preprocessor = make_pipeline(PolynomialFeatures(2, include_bias=False), SelectKBest(f_classif, k=10))
LR_model = make_pipeline(preprocessor, LinearRegression())
RandomForest_model = make_pipeline(preprocessor, RandomForestRegressor())
DecisionTreeRegressor_model = make_pipeline(preprocessor, DecisionTreeRegressor())
from sklearn.ensemble import BaggingRegressor
regr = BaggingRegressor(base_estimator=XGBRegressor(), n_estimators=10, random_state=0).fit(X_train, y_train)
preds_valid = regr.predict(X_valid)
def evaluation(model):
model.fit(X_train, y_train)
preds_valid = model.predict(X_valid)
list_models = {'LR_model': LR_model, 'RandomForest_model': RandomForest_model, 'DecisionTreeRegressor_model': DecisionTreeRegressor_model}
for name, model in list_models.items():
print(name)
evaluation(model) | code |
73074503/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
y.dtype
for col in features.select_dtypes('object'):
print(f'{col:-<50}{features[col].unique()}') | code |
73074503/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
y.dtype
for col in features.select_dtypes('float'):
plt.figure()
sns.histplot(features[col]) | code |
73074503/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts() | code |
73074503/cell_27 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
y.dtype
for col in features.select_dtypes('object'):
x = features[col].value_counts()
sns.clustermap(features.corr(), cbar=True, annot=True) | code |
73074503/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.shape
train.dtypes.value_counts()
y = train['target']
features = train.drop(['target'], axis=1)
features.head() | code |
1010157/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.info() | code |
1010157/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df['PREVAILING_WAGE'].describe() | code |
1010157/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.isnull().sum()
df[['EMPLOYER_NAME', 'PREVAILING_WAGE']].groupby('EMPLOYER_NAME', as_index=False).mean().sort_values(by='PREVAILING_WAGE', ascending=False).head(20) | code |
1010157/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.describe(include=['O']) | code |
1010157/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.isnull().sum()
df.EMPLOYER_NAME.value_counts().head(20).plot(kind='bar') | code |
1010157/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.head() | code |
1010157/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.isnull().sum()
df.YEAR.value_counts().plot(kind='bar') | code |
1010157/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.isnull().sum()
df.WORKSITE.value_counts().head(20).plot(kind='bar') | code |
1010157/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.isnull().sum() | code |
1010157/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/h1b_kaggle.csv')
df.isnull().sum()
df.FULL_TIME_POSITION.value_counts().plot(kind='bar') | code |
128019012/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
plt.plot(year, price) | code |
128019012/cell_4 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
plt.plot(year, price) | code |
128019012/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
plt.ylim(0, 100000)
a = pd.read_csv('/kaggle/input/batter/batter.csv')
a
a=a.head(50)
plt.plot(a['avg'], a['strike_rate'], 'o') | code |
128019012/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
plt.plot(cricketer['index'], cricketer['RG Sharma']) | code |
128019012/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
plt.ylim(0, 100000)
a = pd.read_csv('/kaggle/input/batter/batter.csv')
a
a=a.head(50)
plt.scatter(a['avg'], a['strike_rate'])
plt.title('analysis of avg and SR of top 50 batsman')
plt.xlabel('average')
plt.ylabel('strike rate') | code |
128019012/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
plt.plot(cricketer['index'], cricketer['RG Sharma'])
plt.plot(cricketer['index'], cricketer['V Kohli']) | code |
128019012/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
plt.plot(cricketer['index'], cricketer['RG Sharma'])
plt.plot(cricketer['index'], cricketer['V Kohli'])
plt.title('Rohit Sharma Vs King Kohli')
plt.xlabel('Season')
plt.ylabel('Runs') | code |
128019012/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
plt.plot(year, price)
plt.ylim(0, 100000)
plt.grid() | code |
128019012/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
a = pd.read_csv('/kaggle/input/batter/batter.csv')
a | code |
128019012/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
price = [46000, 56000, 60000, 54000, 70000, 4500000]
year = [2018, 2019, 2020, 2021, 2022, 2023]
plt.ylim(0, 100000)
a = pd.read_csv('/kaggle/input/batter/batter.csv')
a
a=a.head(50)
characters = ['naruto', 'kagashi', 'pain', 'sakura', 'lee']
dialogue = [5000, 4500, 6500, 350, 650]
plt.bar(characters, dialogue) | code |
128019012/cell_10 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
plt.plot(cricketer['index'], cricketer['RG Sharma'], color='Green', linestyle='dashed', linewidth=3, marker='o')
plt.plot(cricketer['index'], cricketer['V Kohli'], color='Orange', linestyle='dotted', linewidth=2, marker='.', markersize=8)
plt.title('Rohit Sharma Vs King Kohli')
plt.xlabel('Season')
plt.ylabel('Runs') | code |
128019012/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
price = [46000, 56000, 60000, 54000, 70000]
year = [2018, 2019, 2020, 2021, 2022]
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer
plt.plot(cricketer['index'], cricketer['RG Sharma'], color='Green', linestyle='dashed', linewidth=3, marker='o', label='Rohit')
plt.plot(cricketer['index'], cricketer['V Kohli'], color='Orange', linestyle='dotted', linewidth=2, marker='.', markersize=8, label='Virat')
plt.legend() | code |
128019012/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
cricketer = pd.read_csv('/kaggle/input/sharma-kohli/sharma-kohli.csv')
cricketer | code |
18121674/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from math import exp
from sklearn.metrics import accuracy_score
from random import randrange
import os
print(os.listdir('../input')) | code |
18121674/cell_8 | [
"text_plain_output_1.png"
] | from math import exp
from random import randrange
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
diabetes_df = pd.read_csv('../input/diabetes.csv')
diabetes_df = diabetes_df.values
diabetes_df
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
predicted = logistic_model.predict(X_test)
lr_accuracy = accuracy_score(y_test, predicted)
report = classification_report(y_test, predicted)
matrix = confusion_matrix(y_test, predicted)
def predict(row, coefficients):
yhat = coefficients[0]
for i in range(len(row) - 1):
yhat += coefficients[i + 1] * row[i]
return 1.0 / (1.0 + exp(-yhat))
def coefficients_sgd(train, l_rate, n_epoch):
coef = [0.0 for i in range(len(train[0]))]
for epoch in range(n_epoch):
sum_error = 0
for row in train:
yhat = predict(row, coef)
error = row[-1] - yhat
sum_error += error ** 2
coef[0] = coef[0] + l_rate * error * yhat * (1.0 - yhat)
for i in range(len(row) - 1):
coef[i + 1] = coef[i + 1] + l_rate * error * yhat * (1.0 - yhat) * row[i]
return coef
def logistic_regression(train, test, l_rate, n_epoch):
predictions = []
coef = coefficients_sgd(train, l_rate, n_epoch)
for r in test:
yhat = predict(r, coef)
yhat = round(yhat)
predictions.append(yhat)
return predictions
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def dataset_minmax(dataset):
minmax = list()
for i in range(len(dataset[0])):
col_values = [row[i] for row in dataset]
value_min = min(col_values)
value_max = max(col_values)
minmax.append([value_min, value_max])
return minmax
def normalize_dataset(dataset, minmax):
for row in dataset:
for i in range(len(row)):
row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for i in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for i, fold in enumerate(folds):
train_set = list(folds)
del train_set[i]
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
minmax = dataset_minmax(diabetes_df)
normalize_dataset(diabetes_df, minmax)
l_rate = 0.3
n_epoch = 100
n_folds = 3
scores = evaluate_algorithm(diabetes_df, logistic_regression, n_folds, l_rate, n_epoch)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores) / float(len(scores)))) | code |
18121674/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
diabetes_df = pd.read_csv('../input/diabetes.csv')
diabetes_df = diabetes_df.values
diabetes_df | code |
18121674/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
predicted = logistic_model.predict(X_test)
lr_accuracy = accuracy_score(y_test, predicted)
print('Logistic Regression Accuracy: {:.2f}%'.format(lr_accuracy * 100))
report = classification_report(y_test, predicted)
print(report)
matrix = confusion_matrix(y_test, predicted)
print(matrix) | code |
333798/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
import re
fanboy_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in fanboy_space_split for j in i if not '@' in j and (not '#' in j)]
about_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in about_space_split for j in i if not '@' in j and (not '#' in j)]
from sklearn.feature_extraction.text import CountVectorizer
fc_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
fanboy_counts = fc_vectorizer.fit_transform(fanboy_text).toarray()
ac_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
about_counts = ac_vectorizer.fit_transform(about_text).toar
from sklearn.decomposition import NMF
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
fanboy_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(fanboy_counts)
about_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(about_counts) | code |
333798/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys() | code |
333798/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
print(len(set(fanboy_data['username'])) / len(set(fanboy_handles)), len(set(about_data['username'])) / len(set(about_handles))) | code |
333798/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
from matplotlib import *
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
333798/cell_11 | [
"text_plain_output_1.png"
] | import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
print(1 / (float(fanboy_graph.order()) / float(fanboy_graph.size())))
print(1 / (float(about_graph.order()) / float(about_graph.size()))) | code |
333798/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
import re
fanboy_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in fanboy_space_split for j in i if not '@' in j and (not '#' in j)]
about_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in about_space_split for j in i if not '@' in j and (not '#' in j)]
from sklearn.feature_extraction.text import CountVectorizer
fc_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
fanboy_counts = fc_vectorizer.fit_transform(fanboy_text).toarray()
ac_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
about_counts = ac_vectorizer.fit_transform(about_text).toar | code |
333798/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib
import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
bet_cen = nx.betweenness_centrality([i for i in fanboy_cc][0])
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
clo_cen = nx.closeness_centrality([i for i in fanboy_cc][0])
fig, ax = matplotlib.pyplot.subplots()
ax.scatter(list(clo_cen.values()), list(bet_cen.values()))
ax.set_ylim(0.04, 0.3)
ax.set_xlim(0.32, 0.45)
ax.set_xlabel('Closeness Centrality')
ax.set_ylabel('Betweenness Centrality')
ax.set_yscale('log')
for i, txt in enumerate(list(clo_cen.keys())):
ax.annotate(txt, (list(clo_cen.values())[i], list(bet_cen.values())[i])) | code |
333798/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
import re
fanboy_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in fanboy_space_split for j in i if not '@' in j and (not '#' in j)]
about_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in about_space_split for j in i if not '@' in j and (not '#' in j)]
from sklearn.feature_extraction.text import CountVectorizer
fc_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
fanboy_counts = fc_vectorizer.fit_transform(fanboy_text).toarray()
ac_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
about_counts = ac_vectorizer.fit_transform(about_text).toar
def print_top_words(model, feature_names, n_top_words):
pass
from sklearn.decomposition import NMF
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
fanboy_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(fanboy_counts)
about_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(about_counts)
fanboy_feature_names = fc_vectorizer.get_feature_names()
print_top_words(fanboy_nmf, fanboy_feature_names, n_top_words) | code |
333798/cell_12 | [
"text_plain_output_1.png"
] | import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
bet_cen = nx.betweenness_centrality([i for i in fanboy_cc][0]) | code |
128044361/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('/kaggle/input/men-born-in-1960-from-7-regions-in-korea-2005-2009/Data.csv', sep=';')
incplot = data.income.hist(grid=False) | code |
128044361/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('/kaggle/input/men-born-in-1960-from-7-regions-in-korea-2005-2009/Data.csv', sep=';')
data.head() | code |
128044361/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('/kaggle/input/men-born-in-1960-from-7-regions-in-korea-2005-2009/Data.csv', sep=';')
data.describe() | code |
128015173/cell_42 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='billy tauzin ran unopposed in 1996', table=['district', 'incumbent', 'party', 'first elected', 'result', 'candidates'])) | code |
128015173/cell_21 | [
"text_plain_output_1.png"
] | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from typing import List
tokenizer = AutoTokenizer.from_pretrained('juierror/flan-t5-text2sql-with-schema')
model = AutoModelForSeq2SeqLM.from_pretrained('juierror/flan-t5-text2sql-with-schema')
def prepare_input(question: str, table: List[str]):
table_prefix = 'table:'
question_prefix = 'question:'
join_table = ','.join(table)
inputs = f'{question_prefix} {question} {table_prefix} {join_table}'
input_ids = tokenizer(inputs, max_length=700, return_tensors='pt').input_ids
return input_ids
def inference(question: str, table: List[str]) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
print(inference(question='in alberta greens , the year 2008 was the only year were over 50 candidates were nominated. Is it true?', table=['election', 'of candidates nominated', 'of seats won', 'of total votes', '% of popular vote'])) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.