path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34150890/cell_7
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import cv2 import keras import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os import keras from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras import regularizers from sklearn.model_selection import train_test_split import cv2 import matplotlib.pyplot as plt import seaborn as sns from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers os.listdir('../input/isl-dataset-double-handed') train_dir = '../input/isl-dataset-double-handed/ISL_Dataset' def load_unique(): size_img = 224,224 images_for_plot = [] labels_for_plot = [] for folder in os.listdir(train_dir): for file in os.listdir(train_dir + '/' + folder): filepath = train_dir + '/' + folder + '/' + file image = cv2.imread(filepath) final_img = cv2.resize(image, size_img) final_img = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB) images_for_plot.append(final_img) labels_for_plot.append(folder) break return images_for_plot, labels_for_plot images_for_plot, labels_for_plot = load_unique() print("unique_labels = ", labels_for_plot) fig = plt.figure(figsize = (15,15)) def plot_images(fig, image, label, row, col, index): fig.add_subplot(row, col, index) plt.axis('off') plt.imshow(image) plt.title(label) return image_index = 0 row = 4 col = 6 for i in range(1,25): plot_images(fig, images_for_plot[image_index], labels_for_plot[image_index], row, col, i) image_index = image_index + 1 plt.show() l1 = [] def load_data(): """ Loads data and preprocess. Returns train and test data along with labels. """ images = [] labels = [] size = (224, 224) for folder in os.listdir(train_dir): for image in os.listdir(train_dir + '/' + folder): temp_img = cv2.imread(train_dir + '/' + folder + '/' + image) temp_img = cv2.resize(temp_img, size) images.append(temp_img) labels.append(ord(folder) - 97) images = np.array(images) for i in range(len(images)): images[i] = images[i].astype('float32') / 255 l1 = labels labels = keras.utils.to_categorical(labels) X_train, X_test, Y_train, Y_test = train_test_split(images, labels, test_size=0.25) return (X_train, X_test, Y_train, Y_test, l1) X_train, X_test, Y_train, Y_test, l1 = load_data()
code
34150890/cell_3
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os import keras from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras import regularizers from sklearn.model_selection import train_test_split import cv2 import matplotlib.pyplot as plt import seaborn as sns from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers os.listdir('../input/isl-dataset-double-handed')
code
34150890/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras import layers from keras import models from keras.applications import VGG16 from sklearn.model_selection import train_test_split import cv2 import keras import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os import keras from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras import regularizers from sklearn.model_selection import train_test_split import cv2 import matplotlib.pyplot as plt import seaborn as sns from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers os.listdir('../input/isl-dataset-double-handed') train_dir = '../input/isl-dataset-double-handed/ISL_Dataset' def load_unique(): size_img = 224,224 images_for_plot = [] labels_for_plot = [] for folder in os.listdir(train_dir): for file in os.listdir(train_dir + '/' + folder): filepath = train_dir + '/' + folder + '/' + file image = cv2.imread(filepath) final_img = cv2.resize(image, size_img) final_img = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB) images_for_plot.append(final_img) labels_for_plot.append(folder) break return images_for_plot, labels_for_plot images_for_plot, labels_for_plot = load_unique() print("unique_labels = ", labels_for_plot) fig = plt.figure(figsize = (15,15)) def plot_images(fig, image, label, row, col, index): fig.add_subplot(row, col, index) plt.axis('off') plt.imshow(image) plt.title(label) return image_index = 0 row = 4 col = 6 for i in range(1,25): plot_images(fig, images_for_plot[image_index], labels_for_plot[image_index], row, col, i) image_index = image_index + 1 plt.show() l1 = [] def load_data(): """ Loads data and preprocess. Returns train and test data along with labels. """ images = [] labels = [] size = (224, 224) for folder in os.listdir(train_dir): for image in os.listdir(train_dir + '/' + folder): temp_img = cv2.imread(train_dir + '/' + folder + '/' + image) temp_img = cv2.resize(temp_img, size) images.append(temp_img) labels.append(ord(folder) - 97) images = np.array(images) for i in range(len(images)): images[i] = images[i].astype('float32') / 255 l1 = labels labels = keras.utils.to_categorical(labels) X_train, X_test, Y_train, Y_test = train_test_split(images, labels, test_size=0.25) return (X_train, X_test, Y_train, Y_test, l1) def create_model1(): vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) for layer in vgg_conv.layers[:-4]: layer.trainable = False model = models.Sequential() model.add(vgg_conv) model.add(layers.Flatten()) model.add(layers.Dense(1024, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(26, activation='softmax')) model.compile(optimizer='adam', loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) model.summary() return model def fit_model(): model_hist = model.fit(X_train, Y_train, batch_size=64, epochs=8, validation_split=0.15) return model_hist model = create_model1() curr_model_hist = fit_model()
code
34150890/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras import layers from keras import models from keras.applications import VGG16 from sklearn.model_selection import train_test_split import cv2 import keras import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os import keras from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras import regularizers from sklearn.model_selection import train_test_split import cv2 import matplotlib.pyplot as plt import seaborn as sns from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers os.listdir('../input/isl-dataset-double-handed') train_dir = '../input/isl-dataset-double-handed/ISL_Dataset' def load_unique(): size_img = 224,224 images_for_plot = [] labels_for_plot = [] for folder in os.listdir(train_dir): for file in os.listdir(train_dir + '/' + folder): filepath = train_dir + '/' + folder + '/' + file image = cv2.imread(filepath) final_img = cv2.resize(image, size_img) final_img = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB) images_for_plot.append(final_img) labels_for_plot.append(folder) break return images_for_plot, labels_for_plot images_for_plot, labels_for_plot = load_unique() print("unique_labels = ", labels_for_plot) fig = plt.figure(figsize = (15,15)) def plot_images(fig, image, label, row, col, index): fig.add_subplot(row, col, index) plt.axis('off') plt.imshow(image) plt.title(label) return image_index = 0 row = 4 col = 6 for i in range(1,25): plot_images(fig, images_for_plot[image_index], labels_for_plot[image_index], row, col, i) image_index = image_index + 1 plt.show() l1 = [] def load_data(): """ Loads data and preprocess. Returns train and test data along with labels. """ images = [] labels = [] size = (224, 224) for folder in os.listdir(train_dir): for image in os.listdir(train_dir + '/' + folder): temp_img = cv2.imread(train_dir + '/' + folder + '/' + image) temp_img = cv2.resize(temp_img, size) images.append(temp_img) labels.append(ord(folder) - 97) images = np.array(images) for i in range(len(images)): images[i] = images[i].astype('float32') / 255 l1 = labels labels = keras.utils.to_categorical(labels) X_train, X_test, Y_train, Y_test = train_test_split(images, labels, test_size=0.25) return (X_train, X_test, Y_train, Y_test, l1) def create_model1(): vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) for layer in vgg_conv.layers[:-4]: layer.trainable = False model = models.Sequential() model.add(vgg_conv) model.add(layers.Flatten()) model.add(layers.Dense(1024, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(26, activation='softmax')) model.compile(optimizer='adam', loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) model.summary() return model def fit_model(): model_hist = model.fit(X_train, Y_train, batch_size=64, epochs=8, validation_split=0.15) return model_hist model = create_model1() curr_model_hist = fit_model() evaluate_metrics = model.evaluate(X_test, Y_test) print('\nEvaluation Accuracy = ', '{:.2f}%'.format(evaluate_metrics[1] * 100), '\nEvaluation loss = ', '{:.6f}'.format(evaluate_metrics[0]))
code
34150890/cell_5
[ "image_output_2.png", "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os import keras from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras import regularizers from sklearn.model_selection import train_test_split import cv2 import matplotlib.pyplot as plt import seaborn as sns from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers os.listdir('../input/isl-dataset-double-handed') train_dir = '../input/isl-dataset-double-handed/ISL_Dataset' def load_unique(): size_img = (224, 224) images_for_plot = [] labels_for_plot = [] for folder in os.listdir(train_dir): for file in os.listdir(train_dir + '/' + folder): filepath = train_dir + '/' + folder + '/' + file image = cv2.imread(filepath) final_img = cv2.resize(image, size_img) final_img = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB) images_for_plot.append(final_img) labels_for_plot.append(folder) break return (images_for_plot, labels_for_plot) images_for_plot, labels_for_plot = load_unique() print('unique_labels = ', labels_for_plot) fig = plt.figure(figsize=(15, 15)) def plot_images(fig, image, label, row, col, index): fig.add_subplot(row, col, index) plt.axis('off') plt.imshow(image) plt.title(label) return image_index = 0 row = 4 col = 6 for i in range(1, 25): plot_images(fig, images_for_plot[image_index], labels_for_plot[image_index], row, col, i) image_index = image_index + 1 plt.show()
code
89142914/cell_2
[ "text_html_output_1.png" ]
import os # os python utilities import warnings import warnings warnings.filterwarnings('ignore') import pandas as pd import numpy as np import os import matplotlib.pyplot as plt from plotly.subplots import make_subplots for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89142914/cell_19
[ "text_html_output_2.png" ]
import matplotlib.pyplot as plt import pandas as pd import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') fg, ax =plt.subplots(1,2,figsize=(20,7)) ax[0].plot(data['Open'],label='Open',color='green') ax[0].set_xlabel('Date',size=15) ax[0].set_ylabel('Price',size=15) ax[0].legend() ax[1].plot(data['Close'],label='Close',color='red') ax[1].set_xlabel('Date',size=15) ax[1].set_ylabel('Price',size=15) ax[1].legend() fg.show() import plotly.graph_objects as go fig = go.Figure(data=go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'])) data['SMA5'] = data.Close.rolling(5).mean() data['SMA20'] = data.Close.rolling(20).mean() data['SMA50'] = data.Close.rolling(50).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.SMA5, line=dict(color='orange', width=1), name='SMA5'), go.Scatter(x=data.Date, y=data.SMA20, line=dict(color='green', width=1), name='SMA20'), go.Scatter(x=data.Date, y=data.SMA50, line=dict(color='blue', width=1), name='SMA50')]) data['EMA5'] = data.Close.ewm(span=5, adjust=False).mean() data['EMA20'] = data.Close.ewm(span=20, adjust=False).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.EMA5, line=dict(color='orange', width=1), name='EMA5'), go.Scatter(x=data.Date, y=data.EMA20, line=dict(color='green', width=1), name='EMA20')]) def bollinger_bands(df, n, m): TP = (df['High'] + df['Low'] + df['Close']) / 3 data = TP B_MA = pd.Series(data.rolling(n, min_periods=n).mean(), name='B_MA') sigma = data.rolling(n, min_periods=n).std() BU = pd.Series(B_MA + m * sigma, name='BU') BL = pd.Series(B_MA - m * sigma, name='BL') df = df.join(B_MA) df = df.join(BU) df = df.join(BL) return df df = bollinger_bands(data, 20, 2) plt.figure(figsize=(15, 5)) plt.plot(df['Date'], df['Adj Close']) plt.title('Price chart (Adj Close) IBEX') plt.show() plt.figure(figsize=(15, 5)) plt.title('Bollinger Bands chart IBEX') plt.plot(df['Date'], df['Adj Close']) plt.plot(df['Date'], df['BU'], alpha=0.3) plt.plot(df['Date'], df['BL'], alpha=0.3) plt.plot(df['Date'], df['B_MA'], alpha=0.3) plt.fill_between(df['Date'], df['BU'], df['BL'], color='grey', alpha=0.1) plt.show()
code
89142914/cell_7
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') data.head()
code
89142914/cell_18
[ "image_output_1.png" ]
import pandas as pd import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') import plotly.graph_objects as go fig = go.Figure(data=go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'])) data['SMA5'] = data.Close.rolling(5).mean() data['SMA20'] = data.Close.rolling(20).mean() data['SMA50'] = data.Close.rolling(50).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.SMA5, line=dict(color='orange', width=1), name='SMA5'), go.Scatter(x=data.Date, y=data.SMA20, line=dict(color='green', width=1), name='SMA20'), go.Scatter(x=data.Date, y=data.SMA50, line=dict(color='blue', width=1), name='SMA50')]) data['EMA5'] = data.Close.ewm(span=5, adjust=False).mean() data['EMA20'] = data.Close.ewm(span=20, adjust=False).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.EMA5, line=dict(color='orange', width=1), name='EMA5'), go.Scatter(x=data.Date, y=data.EMA20, line=dict(color='green', width=1), name='EMA20')]) def bollinger_bands(df, n, m): TP = (df['High'] + df['Low'] + df['Close']) / 3 data = TP B_MA = pd.Series(data.rolling(n, min_periods=n).mean(), name='B_MA') sigma = data.rolling(n, min_periods=n).std() BU = pd.Series(B_MA + m * sigma, name='BU') BL = pd.Series(B_MA - m * sigma, name='BL') df = df.join(B_MA) df = df.join(BU) df = df.join(BL) return df df = bollinger_bands(data, 20, 2) print(df.tail())
code
89142914/cell_8
[ "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') fg, ax = plt.subplots(1, 2, figsize=(20, 7)) ax[0].plot(data['Open'], label='Open', color='green') ax[0].set_xlabel('Date', size=15) ax[0].set_ylabel('Price', size=15) ax[0].legend() ax[1].plot(data['Close'], label='Close', color='red') ax[1].set_xlabel('Date', size=15) ax[1].set_ylabel('Price', size=15) ax[1].legend() fg.show()
code
89142914/cell_16
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') import plotly.graph_objects as go fig = go.Figure(data=go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'])) data['SMA5'] = data.Close.rolling(5).mean() data['SMA20'] = data.Close.rolling(20).mean() data['SMA50'] = data.Close.rolling(50).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.SMA5, line=dict(color='orange', width=1), name='SMA5'), go.Scatter(x=data.Date, y=data.SMA20, line=dict(color='green', width=1), name='SMA20'), go.Scatter(x=data.Date, y=data.SMA50, line=dict(color='blue', width=1), name='SMA50')]) data['EMA5'] = data.Close.ewm(span=5, adjust=False).mean() data['EMA20'] = data.Close.ewm(span=20, adjust=False).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.EMA5, line=dict(color='orange', width=1), name='EMA5'), go.Scatter(x=data.Date, y=data.EMA20, line=dict(color='green', width=1), name='EMA20')]) fig = make_subplots(specs=[[{'secondary_y': True}]]) fig.add_trace(go.Candlestick(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close']), secondary_y=True) fig.add_trace(go.Bar(x=data['Date'], y=data['Volume']), secondary_y=False) fig.layout.yaxis2.showgrid = False fig.show()
code
89142914/cell_3
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') data.head()
code
89142914/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') import plotly.graph_objects as go fig = go.Figure(data=go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'])) data['SMA5'] = data.Close.rolling(5).mean() data['SMA20'] = data.Close.rolling(20).mean() data['SMA50'] = data.Close.rolling(50).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.SMA5, line=dict(color='orange', width=1), name='SMA5'), go.Scatter(x=data.Date, y=data.SMA20, line=dict(color='green', width=1), name='SMA20'), go.Scatter(x=data.Date, y=data.SMA50, line=dict(color='blue', width=1), name='SMA50')]) data['EMA5'] = data.Close.ewm(span=5, adjust=False).mean() data['EMA20'] = data.Close.ewm(span=20, adjust=False).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.EMA5, line=dict(color='orange', width=1), name='EMA5'), go.Scatter(x=data.Date, y=data.EMA20, line=dict(color='green', width=1), name='EMA20')]) fig.show()
code
89142914/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') import plotly.graph_objects as go fig = go.Figure(data=go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'])) fig.show()
code
89142914/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') import plotly.graph_objects as go fig = go.Figure(data=go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'])) data['SMA5'] = data.Close.rolling(5).mean() data['SMA20'] = data.Close.rolling(20).mean() data['SMA50'] = data.Close.rolling(50).mean() fig = go.Figure(data=[go.Ohlc(x=data['Date'], open=data['Open'], high=data['High'], low=data['Low'], close=data['Close'], name='OHLC'), go.Scatter(x=data.Date, y=data.SMA5, line=dict(color='orange', width=1), name='SMA5'), go.Scatter(x=data.Date, y=data.SMA20, line=dict(color='green', width=1), name='SMA20'), go.Scatter(x=data.Date, y=data.SMA50, line=dict(color='blue', width=1), name='SMA50')]) fig.show()
code
89142914/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/ibex3519942020/IBEX-2021.csv') data.info()
code
32063570/cell_21
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) cm = confusion_matrix(y_test, y_pred) conf_matrix = pd.DataFrame(data=cm, columns=['Predicted:0', 'Predicted:1'], index=['Actual:0', 'Actual:1']) plt.figure(figsize=(8, 5)) sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='YlGnBu')
code
32063570/cell_13
[ "image_output_1.png" ]
from statsmodels.tools import add_constant import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import statsmodels.api as sm df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() from statsmodels.tools import add_constant df_constant = add_constant(df) st.chisqprob = lambda chisq, df: st.chi2.sf(chisq, df) cols = df_constant.columns[:-1] model = sm.Logit(df.TenYearCHD, df_constant[cols]) r = model.fit() r.summary()
code
32063570/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() sns.countplot(x='TenYearCHD', data=df)
code
32063570/cell_25
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) cm = confusion_matrix(y_test, y_pred) conf_matrix = pd.DataFrame(data=cm, columns=['Predicted:0', 'Predicted:1'], index=['Actual:0', 'Actual:1']) y_pred_prob = lgr.predict_proba(x_test)[:, :] y_pred_prob_df = pd.DataFrame(data=y_pred_prob, columns=['No Heart Disease (0)', 'Heart Disease (1)']) y_pred_prob_df.head()
code
32063570/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum()
code
32063570/cell_23
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) cm = confusion_matrix(y_test, y_pred) conf_matrix = pd.DataFrame(data=cm, columns=['Predicted:0', 'Predicted:1'], index=['Actual:0', 'Actual:1']) TN = cm[0, 0] TP = cm[1, 1] FN = cm[1, 0] FP = cm[0, 1] sensitivity = TP / float(TP + FN) specificity = TN / float(TN + FP) print('The accuracy of the model = TP+TN/(TP+TN+FP+FN) = ', (TP + TN) / float(TP + TN + FP + FN), '\n', 'The Missclassification = 1-Accuracy = ', 1 - (TP + TN) / float(TP + TN + FP + FN), '\n', 'Sensitivity or True Positive Rate = TP/(TP+FN) = ', TP / float(TP + FN), '\n', 'Specificity or True Negative Rate = TN/(TN+FP) = ', TN / float(TN + FP), '\n', 'Positive Predictive value = TP/(TP+FP) = ', TP / float(TP + FP), '\n', 'Negative predictive Value = TN/(TN+FN) = ', TN / float(TN + FN), '\n', 'Positive Likelihood Ratio = Sensitivity/(1-Specificity) = ', sensitivity / (1 - specificity), '\n', 'Negative likelihood Ratio = (1-Sensitivity)/Specificity = ', (1 - sensitivity) / specificity)
code
32063570/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.preprocessing import binarize import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import sklearn df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) sklearn.metrics.accuracy_score(y_test, y_pred) cm = confusion_matrix(y_test, y_pred) conf_matrix = pd.DataFrame(data=cm, columns=['Predicted:0', 'Predicted:1'], index=['Actual:0', 'Actual:1']) y_pred_prob = lgr.predict_proba(x_test)[:, :] y_pred_prob_df = pd.DataFrame(data=y_pred_prob, columns=['No Heart Disease (0)', 'Heart Disease (1)']) for i in range(1, 5): cm2 = 0 y_pred_prob_yes = lgr.predict_proba(x_test) y_pred2 = binarize(y_pred_prob_yes, i / 10)[:, 1] cm2 = confusion_matrix(y_test, y_pred2) sklearn.metrics.roc_auc_score(y_test, y_pred_prob_yes[:, 1])
code
32063570/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.head()
code
32063570/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() df.describe()
code
32063570/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression import sklearn lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) sklearn.metrics.accuracy_score(y_test, y_pred)
code
32063570/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import scipy.stats as st import statsmodels.api as sm import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import confusion_matrix import matplotlib.mlab as mlab
code
32063570/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe, features, rows, cols): fig = plt.figure(figsize=(20, 20)) for i, feature in enumerate(features): a = fig.add_subplot(rows, cols, i + 1) dataframe[feature].hist(bins=20, ax=a, facecolor='green') a.set_title(feature + 'Distribution', color='blue') fig.tight_layout() plt.show() draw_histograms(df, df.columns, 6, 3)
code
32063570/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from sklearn.preprocessing import binarize import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) cm = confusion_matrix(y_test, y_pred) conf_matrix = pd.DataFrame(data=cm, columns=['Predicted:0', 'Predicted:1'], index=['Actual:0', 'Actual:1']) y_pred_prob = lgr.predict_proba(x_test)[:, :] y_pred_prob_df = pd.DataFrame(data=y_pred_prob, columns=['No Heart Disease (0)', 'Heart Disease (1)']) for i in range(1, 5): cm2 = 0 y_pred_prob_yes = lgr.predict_proba(x_test) y_pred2 = binarize(y_pred_prob_yes, i / 10)[:, 1] cm2 = confusion_matrix(y_test, y_pred2) from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob_yes[:, 1]) plt.plot(fpr, tpr) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.title('ROC curve for Heart disease classifier') plt.xlabel('False positive rate (1-Specificity)') plt.ylabel('True positive rate (Sensitivity)') plt.grid(True)
code
32063570/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts()
code
32063570/cell_14
[ "text_plain_output_1.png" ]
from statsmodels.tools import add_constant import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats as st import statsmodels.api as sm df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() from statsmodels.tools import add_constant df_constant = add_constant(df) st.chisqprob = lambda chisq, df: st.chi2.sf(chisq, df) cols = df_constant.columns[:-1] model = sm.Logit(df.TenYearCHD, df_constant[cols]) r = model.fit() r.summary() p = np.exp(r.params) conf = np.exp(r.conf_int()) conf['OR'] = p pv = round(r.pvalues, 3) conf['pvalue'] = pv conf.columns = ['CI 95%(2.5%)', 'CI 95%(97.5%)', 'Odds Ratio', 'pvalue'] print(conf)
code
32063570/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() sns.pairplot(df)
code
32063570/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.preprocessing import binarize import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() lgr = LogisticRegression() lgr.fit(x_test, y_test) y_pred = lgr.predict(x_test) cm = confusion_matrix(y_test, y_pred) conf_matrix = pd.DataFrame(data=cm, columns=['Predicted:0', 'Predicted:1'], index=['Actual:0', 'Actual:1']) y_pred_prob = lgr.predict_proba(x_test)[:, :] y_pred_prob_df = pd.DataFrame(data=y_pred_prob, columns=['No Heart Disease (0)', 'Heart Disease (1)']) for i in range(1, 5): cm2 = 0 y_pred_prob_yes = lgr.predict_proba(x_test) y_pred2 = binarize(y_pred_prob_yes, i / 10)[:, 1] cm2 = confusion_matrix(y_test, y_pred2) print('With', i / 10, 'threshold the Confusion Matrix is ', '\n', cm2, '\n', 'with', cm2[0, 0] + cm2[1, 1], 'correct predictions and', cm2[1, 0], 'Type II errors( False Negatives)', '\n\n', 'Sensitivity: ', cm2[1, 1] / float(cm2[1, 1] + cm2[1, 0]), 'Specificity: ', cm2[0, 0] / float(cm2[0, 0] + cm2[0, 1]), '\n\n\n')
code
32063570/cell_12
[ "text_plain_output_1.png" ]
from statsmodels.tools import add_constant import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 df.dropna(axis=0, inplace=True) def draw_histograms(dataframe,features,rows,cols): fig = plt.figure(figsize = (20,20)) for i, feature in enumerate(features): a = fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins = 20,ax=a,facecolor = 'green') a.set_title(feature + "Distribution",color = 'blue') fig.tight_layout() plt.show() draw_histograms(df,df.columns,6,3) df.TenYearCHD.value_counts() from statsmodels.tools import add_constant df_constant = add_constant(df) df_constant.head()
code
32063570/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/framingham.csv') df.drop(['education'], axis=1, inplace=True) df.rename(columns={'male': 'Sex_male'}, inplace=True) df.isnull().sum() count = 0 for i in df.isnull().sum(axis=1): count = count + 1 print('Total number of missing values:', count)
code
90118273/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time']) test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time']) sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv') missing_values_train = train.isna().any().sum() missing_values_test = test.isna().any().sum() duplicates_train = train.duplicated().sum() print('Duplicates in train data: {0}'.format(duplicates_train)) duplicates_test = test.duplicated().sum() print('Duplicates in test data: {0}'.format(duplicates_test))
code
90118273/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time']) test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time']) sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv') print('Train data shape:', train.shape) print('Test data shape:', test.shape)
code
90118273/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time']) test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time']) sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv') train.describe()
code
90118273/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time']) test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time']) sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv') missing_values_train = train.isna().any().sum() print('Missing values in train data: {0}'.format(missing_values_train[missing_values_train > 0])) missing_values_test = test.isna().any().sum() print('Missing values in test data: {0}'.format(missing_values_test[missing_values_test > 0]))
code
90118273/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time']) test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time']) sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv') print('Columns: \n{0}'.format(list(train.columns)))
code
90118273/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time']) test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time']) sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv') train.head()
code
128005453/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() g = sns.pairplot(hrp, kind='reg', diag_kws={'color': 'red'}) g.fig.suptitle('Correlation of House rent prediction Dataset', y=1.08) plt.show()
code
128005453/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['Furnishing Status'].value_counts()
code
128005453/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['Area Locality'].value_counts()
code
128005453/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.describe()
code
128005453/cell_34
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue model.fvalue model.conf_int() model.f_pvalue model.tvalues
code
128005453/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() g= sns.pairplot(hrp,kind="reg",diag_kws= {'color': 'red'}) g.fig.suptitle("Correlation of House rent prediction Dataset", y=1.08) plt.show() import seaborn as sns sns.pairplot(hrp, hue='Rent', corner=True)
code
128005453/cell_30
[ "image_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue print('f_pvalue: ', '%.4f' % model.f_pvalue)
code
128005453/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue model.fvalue model.conf_int() model.f_pvalue
code
128005453/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr()
code
128005453/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes
code
128005453/cell_29
[ "image_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue
code
128005453/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary()
code
128005453/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.head()
code
128005453/cell_11
[ "text_html_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['City'].value_counts()
code
128005453/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp['City'].value_counts().plot.pie()
code
128005453/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape
code
128005453/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp['Area Locality'].value_counts()
code
128005453/cell_32
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue model.fvalue model.conf_int()
code
128005453/cell_28
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1]
code
128005453/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['Floor'].value_counts()
code
128005453/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna()
code
128005453/cell_3
[ "image_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.tail()
code
128005453/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum()
code
128005453/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue model.fvalue model.conf_int() model.f_pvalue model.tvalues model.mse_model
code
128005453/cell_31
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params model.summary().tables[1] model.f_pvalue model.fvalue
code
128005453/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) X.head()
code
128005453/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['Point of Contact'].value_counts()
code
128005453/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() g= sns.pairplot(hrp,kind="reg",diag_kws= {'color': 'red'}) g.fig.suptitle("Correlation of House rent prediction Dataset", y=1.08) plt.show() sns.jointplot(x='BHK', y='Rent', data=hrp, kind='reg', color='green') plt.show()
code
128005453/cell_10
[ "text_html_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['Area Type'].value_counts()
code
128005453/cell_27
[ "image_output_1.png" ]
import pandas as pd import statsmodels.api as sm hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp.isna() hrp.isna().sum() hrp.corr() X = hrp.BHK X = sm.add_constant(X) y = hrp.Rent slr = sm.OLS(y, X) model = slr.fit() model.summary() model.params
code
128005453/cell_12
[ "text_html_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.dtypes hrp.shape hrp['Tenant Preferred'].value_counts()
code
128005453/cell_5
[ "text_html_output_1.png" ]
import pandas as pd hrp = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') hrp.info()
code
105201509/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) movies = pd.read_csv('/kaggle/input/movies/Movies.csv') movies #Converting a column of the dataframe to a list genres_list = movies["genres"].head(10).to_list() language_list = movies["language"].unique() movies["budget"] = movies["budget"].astype(int) budget_value = movies["budget"].head(5).to_list() language_list
code
105201509/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) movies = pd.read_csv('/kaggle/input/movies/Movies.csv') movies
code
105201509/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105201509/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) movies = pd.read_csv('/kaggle/input/movies/Movies.csv') movies movies.columns.to_list().index('imdb_score')
code
105201509/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) movies = pd.read_csv('/kaggle/input/movies/Movies.csv') movies Gross_amount = movies['gross'].sort_values(ascending=False) Gross_amount.head(10)
code
105201509/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) movies = pd.read_csv('/kaggle/input/movies/Movies.csv') movies #Converting a column of the dataframe to a list genres_list = movies["genres"].head(10).to_list() language_list = movies["language"].unique() movies["budget"] = movies["budget"].astype(int) budget_value = movies["budget"].head(5).to_list() genres_list
code
88086475/cell_21
[ "text_plain_output_1.png" ]
# Generates metadata for test images. path_to_test_metadata = "/kaggle/working/test.csv" !echo "image,species,individual_id" > {path_to_test_metadata} !ls {path_to_inputs}/test_images | sed "s/.jpg/.jpg,unknown,unknown/g" >> {path_to_test_metadata} # Shows contents of generated metadata. !head {path_to_test_metadata}
code
88086475/cell_13
[ "text_plain_output_1.png" ]
!head {path_to_inputs}/sample_submission.csv
code
88086475/cell_25
[ "text_plain_output_1.png" ]
# Installs required libraries. !pip install numpy !pip install pandas !pip install keras !pip install Pillow !pip install imagehash !pip install sewar !pip install plotly
code
88086475/cell_33
[ "text_plain_output_1.png" ]
path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() print('Number of species:') print(len(all_species)) print() print('Name of species:') print(all_species) print()
code
88086475/cell_44
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "text_plain_output_5.png", "text_plain_output_30.png", "text_plain_output_15.png", "image_output_17.png", "image_output_30.png", "text_plain_output_9.png", "image_output_14.png", "image_output_28.png", "text_plain_output_20.png", "image_output_23.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "image_output_18.png", "text_plain_output_29.png", "image_output_21.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "image_output_20.png", "text_plain_output_18.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_22.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_16.png", "image_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "image_output_27.png", "image_output_6.png", "text_plain_output_23.png", "image_output_12.png", "text_plain_output_28.png", "image_output_22.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_29.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_15.png", "image_output_9.png", "image_output_19.png", "image_output_26.png" ]
from PIL import Image, ImageDraw import matplotlib.pyplot as plt import pandas as pd # Defines the class to load metadata and images and to process those. class WhaleAndDolphin(): def __init__(self, path_to_metadata, path_to_dir_images): self._path_to_metadata = path_to_metadata self._path_to_dir_images = path_to_dir_images self._metadata = pd.read_csv(path_to_metadata) def getAllSpecies(self): return self._metadata["species"].unique() def sliceMetadata(self, query): return self._metadata.query(query).reset_index(drop=True) def getAllIndividualIDs(self, metadata): return metadata["individual_id"].unique() def showImagesTile(self, metadata, num_cols=3): num_rows = len(metadata) // num_cols + 1 fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) for row in metadata.itertuples(): title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, row.Index + 1) ax.set_title(title) plt.imshow(image) plt.show() plt.clf() plt.close() def getImage(self, metadata_row): title = "%s (%s)" % (metadata_row.individual_id, \ metadata_row.species) path_to_image = self._pathToImage( self._path_to_dir_images, file_name=metadata_row.image ) image = Image.open(path_to_image) return title, image def _pathToImage(self, path_to_dir_images, file_name): return "%s/%s" % (path_to_dir_images, file_name) def showIndividualImagesTile(self, metadata, num_cols=3, num_individuals=10): metadata_sorted = \ metadata.sort_values(by=["individual_id"]).reset_index(drop=True) num_rows = num_cols * num_individuals fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) num_images = 0 individual_id_prev = "" i_row, i_col = 0, 0 for row in metadata_sorted.itertuples(): if row.individual_id != individual_id_prev: # Moves to next row, if different individuals is found. i_row += 1 i_col = 1 i = (i_row - 1) * num_cols + i_col individual_id_prev = row.individual_id if i_row > num_individuals: break # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print("New individuals is found!!") #print("> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG elif i_col < num_cols: # Moves to next column, if the number of images for same individuals is less than num_cols. i_col += 1 i = (i_row - 1) * num_cols + i_col # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print(">> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG else: #print("< i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG continue plt.show() plt.clf() plt.close() path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} stats = pd.DataFrame(columns=['num_of_images', 'num_of_individuals'], index=all_species) for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) num_individuals = len(individual_ids) stats.loc[species] = [num_images, num_individuals] stats.loc['total'] = [stats['num_of_images'].sum(), stats['num_of_individuals'].sum()] species = 'melon_headed_whale' individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) stats = pd.DataFrame(columns=['num_of_images'], index=individual_ids) for individual_id in individual_ids: metadata_individual = metadata[species].query('individual_id == @individual_id').reset_index(drop=True) num_images = len(metadata_individual) stats.loc[individual_id] = [num_images] stats.loc['total'] = [stats['num_of_images'].sum()] for species in all_species: print('Images for %s :' % species) whale_and_dolphin.showIndividualImagesTile(metadata=metadata[species], num_cols=4, num_individuals=10) print()
code
88086475/cell_41
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from PIL import Image, ImageDraw import matplotlib.pyplot as plt import pandas as pd # Defines the class to load metadata and images and to process those. class WhaleAndDolphin(): def __init__(self, path_to_metadata, path_to_dir_images): self._path_to_metadata = path_to_metadata self._path_to_dir_images = path_to_dir_images self._metadata = pd.read_csv(path_to_metadata) def getAllSpecies(self): return self._metadata["species"].unique() def sliceMetadata(self, query): return self._metadata.query(query).reset_index(drop=True) def getAllIndividualIDs(self, metadata): return metadata["individual_id"].unique() def showImagesTile(self, metadata, num_cols=3): num_rows = len(metadata) // num_cols + 1 fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) for row in metadata.itertuples(): title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, row.Index + 1) ax.set_title(title) plt.imshow(image) plt.show() plt.clf() plt.close() def getImage(self, metadata_row): title = "%s (%s)" % (metadata_row.individual_id, \ metadata_row.species) path_to_image = self._pathToImage( self._path_to_dir_images, file_name=metadata_row.image ) image = Image.open(path_to_image) return title, image def _pathToImage(self, path_to_dir_images, file_name): return "%s/%s" % (path_to_dir_images, file_name) def showIndividualImagesTile(self, metadata, num_cols=3, num_individuals=10): metadata_sorted = \ metadata.sort_values(by=["individual_id"]).reset_index(drop=True) num_rows = num_cols * num_individuals fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) num_images = 0 individual_id_prev = "" i_row, i_col = 0, 0 for row in metadata_sorted.itertuples(): if row.individual_id != individual_id_prev: # Moves to next row, if different individuals is found. i_row += 1 i_col = 1 i = (i_row - 1) * num_cols + i_col individual_id_prev = row.individual_id if i_row > num_individuals: break # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print("New individuals is found!!") #print("> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG elif i_col < num_cols: # Moves to next column, if the number of images for same individuals is less than num_cols. i_col += 1 i = (i_row - 1) * num_cols + i_col # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print(">> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG else: #print("< i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG continue plt.show() plt.clf() plt.close() path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} stats = pd.DataFrame(columns=['num_of_images', 'num_of_individuals'], index=all_species) for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) num_individuals = len(individual_ids) stats.loc[species] = [num_images, num_individuals] stats.loc['total'] = [stats['num_of_images'].sum(), stats['num_of_individuals'].sum()] species = 'melon_headed_whale' individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) stats = pd.DataFrame(columns=['num_of_images'], index=individual_ids) for individual_id in individual_ids: metadata_individual = metadata[species].query('individual_id == @individual_id').reset_index(drop=True) num_images = len(metadata_individual) stats.loc[individual_id] = [num_images] stats.loc['total'] = [stats['num_of_images'].sum()] pd.set_option('display.max_rows', None) print('Number of images for each individual of %s:' % species) print('(Number of individuals for %s: %d)' % (species, len(stats) - 1)) stats
code
88086475/cell_11
[ "text_plain_output_1.png" ]
path_to_inputs = "/kaggle/input/happy-whale-and-dolphin" !ls {path_to_inputs}
code
88086475/cell_19
[ "text_plain_output_1.png" ]
!echo "Number of train_images:" !ls {path_to_inputs}/train_images | cat -n | tail -1 | cut -f1 !echo "" !echo "Number of test_images:" !ls {path_to_inputs}/test_images | cat -n | tail -1 | cut -f1
code
88086475/cell_50
[ "text_plain_output_1.png" ]
from PIL import Image, ImageDraw import matplotlib.pyplot as plt import pandas as pd # Defines the class to load metadata and images and to process those. class WhaleAndDolphin(): def __init__(self, path_to_metadata, path_to_dir_images): self._path_to_metadata = path_to_metadata self._path_to_dir_images = path_to_dir_images self._metadata = pd.read_csv(path_to_metadata) def getAllSpecies(self): return self._metadata["species"].unique() def sliceMetadata(self, query): return self._metadata.query(query).reset_index(drop=True) def getAllIndividualIDs(self, metadata): return metadata["individual_id"].unique() def showImagesTile(self, metadata, num_cols=3): num_rows = len(metadata) // num_cols + 1 fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) for row in metadata.itertuples(): title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, row.Index + 1) ax.set_title(title) plt.imshow(image) plt.show() plt.clf() plt.close() def getImage(self, metadata_row): title = "%s (%s)" % (metadata_row.individual_id, \ metadata_row.species) path_to_image = self._pathToImage( self._path_to_dir_images, file_name=metadata_row.image ) image = Image.open(path_to_image) return title, image def _pathToImage(self, path_to_dir_images, file_name): return "%s/%s" % (path_to_dir_images, file_name) def showIndividualImagesTile(self, metadata, num_cols=3, num_individuals=10): metadata_sorted = \ metadata.sort_values(by=["individual_id"]).reset_index(drop=True) num_rows = num_cols * num_individuals fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) num_images = 0 individual_id_prev = "" i_row, i_col = 0, 0 for row in metadata_sorted.itertuples(): if row.individual_id != individual_id_prev: # Moves to next row, if different individuals is found. i_row += 1 i_col = 1 i = (i_row - 1) * num_cols + i_col individual_id_prev = row.individual_id if i_row > num_individuals: break # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print("New individuals is found!!") #print("> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG elif i_col < num_cols: # Moves to next column, if the number of images for same individuals is less than num_cols. i_col += 1 i = (i_row - 1) * num_cols + i_col # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print(">> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG else: #print("< i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG continue plt.show() plt.clf() plt.close() path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} stats = pd.DataFrame(columns=['num_of_images', 'num_of_individuals'], index=all_species) for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) num_individuals = len(individual_ids) stats.loc[species] = [num_images, num_individuals] stats.loc['total'] = [stats['num_of_images'].sum(), stats['num_of_individuals'].sum()] species = 'melon_headed_whale' individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) stats = pd.DataFrame(columns=['num_of_images'], index=individual_ids) for individual_id in individual_ids: metadata_individual = metadata[species].query('individual_id == @individual_id').reset_index(drop=True) num_images = len(metadata_individual) stats.loc[individual_id] = [num_images] stats.loc['total'] = [stats['num_of_images'].sum()] path_to_metadata = '%s' % path_to_test_metadata path_to_dir_images = '%s/test_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() print('Number of species:') print(len(all_species)) print() print('All species:') print(all_species) print()
code
88086475/cell_51
[ "text_plain_output_1.png" ]
from PIL import Image, ImageDraw import matplotlib.pyplot as plt import pandas as pd # Defines the class to load metadata and images and to process those. class WhaleAndDolphin(): def __init__(self, path_to_metadata, path_to_dir_images): self._path_to_metadata = path_to_metadata self._path_to_dir_images = path_to_dir_images self._metadata = pd.read_csv(path_to_metadata) def getAllSpecies(self): return self._metadata["species"].unique() def sliceMetadata(self, query): return self._metadata.query(query).reset_index(drop=True) def getAllIndividualIDs(self, metadata): return metadata["individual_id"].unique() def showImagesTile(self, metadata, num_cols=3): num_rows = len(metadata) // num_cols + 1 fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) for row in metadata.itertuples(): title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, row.Index + 1) ax.set_title(title) plt.imshow(image) plt.show() plt.clf() plt.close() def getImage(self, metadata_row): title = "%s (%s)" % (metadata_row.individual_id, \ metadata_row.species) path_to_image = self._pathToImage( self._path_to_dir_images, file_name=metadata_row.image ) image = Image.open(path_to_image) return title, image def _pathToImage(self, path_to_dir_images, file_name): return "%s/%s" % (path_to_dir_images, file_name) def showIndividualImagesTile(self, metadata, num_cols=3, num_individuals=10): metadata_sorted = \ metadata.sort_values(by=["individual_id"]).reset_index(drop=True) num_rows = num_cols * num_individuals fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) num_images = 0 individual_id_prev = "" i_row, i_col = 0, 0 for row in metadata_sorted.itertuples(): if row.individual_id != individual_id_prev: # Moves to next row, if different individuals is found. i_row += 1 i_col = 1 i = (i_row - 1) * num_cols + i_col individual_id_prev = row.individual_id if i_row > num_individuals: break # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print("New individuals is found!!") #print("> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG elif i_col < num_cols: # Moves to next column, if the number of images for same individuals is less than num_cols. i_col += 1 i = (i_row - 1) * num_cols + i_col # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print(">> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG else: #print("< i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG continue plt.show() plt.clf() plt.close() path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} stats = pd.DataFrame(columns=['num_of_images', 'num_of_individuals'], index=all_species) for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) num_individuals = len(individual_ids) stats.loc[species] = [num_images, num_individuals] stats.loc['total'] = [stats['num_of_images'].sum(), stats['num_of_individuals'].sum()] species = 'melon_headed_whale' individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) stats = pd.DataFrame(columns=['num_of_images'], index=individual_ids) for individual_id in individual_ids: metadata_individual = metadata[species].query('individual_id == @individual_id').reset_index(drop=True) num_images = len(metadata_individual) stats.loc[individual_id] = [num_images] stats.loc['total'] = [stats['num_of_images'].sum()] path_to_metadata = '%s' % path_to_test_metadata path_to_dir_images = '%s/test_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() print('Number of images:') print() print('species, num_of_images') metadata = {} for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) print('%s, %d' % (species, num_images))
code
88086475/cell_16
[ "text_plain_output_1.png" ]
!ls {path_to_inputs}/train_images | head
code
88086475/cell_17
[ "text_plain_output_1.png" ]
!ls {path_to_inputs}/test_images | head
code
88086475/cell_14
[ "text_plain_output_1.png" ]
!head {path_to_inputs}/train.csv
code
88086475/cell_53
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image, ImageDraw import matplotlib.pyplot as plt import pandas as pd # Defines the class to load metadata and images and to process those. class WhaleAndDolphin(): def __init__(self, path_to_metadata, path_to_dir_images): self._path_to_metadata = path_to_metadata self._path_to_dir_images = path_to_dir_images self._metadata = pd.read_csv(path_to_metadata) def getAllSpecies(self): return self._metadata["species"].unique() def sliceMetadata(self, query): return self._metadata.query(query).reset_index(drop=True) def getAllIndividualIDs(self, metadata): return metadata["individual_id"].unique() def showImagesTile(self, metadata, num_cols=3): num_rows = len(metadata) // num_cols + 1 fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) for row in metadata.itertuples(): title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, row.Index + 1) ax.set_title(title) plt.imshow(image) plt.show() plt.clf() plt.close() def getImage(self, metadata_row): title = "%s (%s)" % (metadata_row.individual_id, \ metadata_row.species) path_to_image = self._pathToImage( self._path_to_dir_images, file_name=metadata_row.image ) image = Image.open(path_to_image) return title, image def _pathToImage(self, path_to_dir_images, file_name): return "%s/%s" % (path_to_dir_images, file_name) def showIndividualImagesTile(self, metadata, num_cols=3, num_individuals=10): metadata_sorted = \ metadata.sort_values(by=["individual_id"]).reset_index(drop=True) num_rows = num_cols * num_individuals fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) num_images = 0 individual_id_prev = "" i_row, i_col = 0, 0 for row in metadata_sorted.itertuples(): if row.individual_id != individual_id_prev: # Moves to next row, if different individuals is found. i_row += 1 i_col = 1 i = (i_row - 1) * num_cols + i_col individual_id_prev = row.individual_id if i_row > num_individuals: break # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print("New individuals is found!!") #print("> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG elif i_col < num_cols: # Moves to next column, if the number of images for same individuals is less than num_cols. i_col += 1 i = (i_row - 1) * num_cols + i_col # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print(">> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG else: #print("< i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG continue plt.show() plt.clf() plt.close() path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} stats = pd.DataFrame(columns=['num_of_images', 'num_of_individuals'], index=all_species) for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) num_individuals = len(individual_ids) stats.loc[species] = [num_images, num_individuals] stats.loc['total'] = [stats['num_of_images'].sum(), stats['num_of_individuals'].sum()] species = 'melon_headed_whale' individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) stats = pd.DataFrame(columns=['num_of_images'], index=individual_ids) for individual_id in individual_ids: metadata_individual = metadata[species].query('individual_id == @individual_id').reset_index(drop=True) num_images = len(metadata_individual) stats.loc[individual_id] = [num_images] stats.loc['total'] = [stats['num_of_images'].sum()] path_to_metadata = '%s' % path_to_test_metadata path_to_dir_images = '%s/test_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) num_images = 100 for species in all_species: print('Images for %s :' % species) whale_and_dolphin.showImagesTile(metadata=metadata[species][:num_images], num_cols=4) print()
code
88086475/cell_37
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from PIL import Image, ImageDraw import matplotlib.pyplot as plt import pandas as pd # Defines the class to load metadata and images and to process those. class WhaleAndDolphin(): def __init__(self, path_to_metadata, path_to_dir_images): self._path_to_metadata = path_to_metadata self._path_to_dir_images = path_to_dir_images self._metadata = pd.read_csv(path_to_metadata) def getAllSpecies(self): return self._metadata["species"].unique() def sliceMetadata(self, query): return self._metadata.query(query).reset_index(drop=True) def getAllIndividualIDs(self, metadata): return metadata["individual_id"].unique() def showImagesTile(self, metadata, num_cols=3): num_rows = len(metadata) // num_cols + 1 fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) for row in metadata.itertuples(): title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, row.Index + 1) ax.set_title(title) plt.imshow(image) plt.show() plt.clf() plt.close() def getImage(self, metadata_row): title = "%s (%s)" % (metadata_row.individual_id, \ metadata_row.species) path_to_image = self._pathToImage( self._path_to_dir_images, file_name=metadata_row.image ) image = Image.open(path_to_image) return title, image def _pathToImage(self, path_to_dir_images, file_name): return "%s/%s" % (path_to_dir_images, file_name) def showIndividualImagesTile(self, metadata, num_cols=3, num_individuals=10): metadata_sorted = \ metadata.sort_values(by=["individual_id"]).reset_index(drop=True) num_rows = num_cols * num_individuals fig = plt.figure(figsize=(6.4 * num_cols, 4.8 * num_rows)) num_images = 0 individual_id_prev = "" i_row, i_col = 0, 0 for row in metadata_sorted.itertuples(): if row.individual_id != individual_id_prev: # Moves to next row, if different individuals is found. i_row += 1 i_col = 1 i = (i_row - 1) * num_cols + i_col individual_id_prev = row.individual_id if i_row > num_individuals: break # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print("New individuals is found!!") #print("> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG elif i_col < num_cols: # Moves to next column, if the number of images for same individuals is less than num_cols. i_col += 1 i = (i_row - 1) * num_cols + i_col # Shows image. title, image = self.getImage(row) ax = fig.add_subplot(num_rows, num_cols, i) ax.set_title(title) plt.imshow(image) #print(">> i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG else: #print("< i_row, i_col, title, id = %d, %d, %s, %s" % (i_row, i_col, title, row.individual_id)) # for DEBUG continue plt.show() plt.clf() plt.close() path_to_metadata = '%s/train.csv' % path_to_inputs path_to_dir_images = '%s/train_images' % path_to_inputs whale_and_dolphin = WhaleAndDolphin(path_to_metadata=path_to_metadata, path_to_dir_images=path_to_dir_images) all_species = whale_and_dolphin.getAllSpecies() metadata = {} stats = pd.DataFrame(columns=['num_of_images', 'num_of_individuals'], index=all_species) for species in all_species: metadata[species] = whale_and_dolphin.sliceMetadata(query='species == @species') num_images = len(metadata[species]) individual_ids = whale_and_dolphin.getAllIndividualIDs(metadata[species]) num_individuals = len(individual_ids) stats.loc[species] = [num_images, num_individuals] stats.loc['total'] = [stats['num_of_images'].sum(), stats['num_of_individuals'].sum()] print('Number of images/individuals for each species:') stats
code
17130551/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ccle.txt/CCLE.txt', index_col=0) from ast import literal_eval as make_tuple cols = df.columns.tolist() new_cols = [make_tuple(x) for x in cols] df.columns = new_cols df.shape
code
17130551/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17130551/cell_8
[ "text_plain_output_1.png" ]
from clustergrammer2 import net import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ccle.txt/CCLE.txt', index_col=0) from ast import literal_eval as make_tuple cols = df.columns.tolist() new_cols = [make_tuple(x) for x in cols] df.columns = new_cols df.shape net.load_df(df.round(2)) net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var') net.widget()
code
17130551/cell_10
[ "text_plain_output_1.png" ]
from clustergrammer2 import net import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/ccle.txt/CCLE.txt', index_col=0) from ast import literal_eval as make_tuple cols = df.columns.tolist() new_cols = [make_tuple(x) for x in cols] df.columns = new_cols df.shape net.load_df(df.round(2)) net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var') net.widget() net.load_df(df) net.filter_N_top(inst_rc='row', N_top=1000, rank_type='var') net.normalize(axis='row', norm_type='zscore') df = net.export_df().round(2) net.load_df(df) net.widget()
code
17130551/cell_5
[ "text_plain_output_1.png" ]
from clustergrammer2 import net show_widget = False from clustergrammer2 import net if show_widget == False: print('\n-----------------------------------------------------') print('>>> <<<') print('>>> Please set show_widget to True to see widgets <<<') print('>>> <<<') print('-----------------------------------------------------\n') delattr(net, 'widget_class')
code
1005893/cell_6
[ "text_plain_output_1.png" ]
from keras.utils.np_utils import to_categorical import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tflearn df_trn = pd.read_csv('../input/train.csv') df_tst = pd.read_csv('../input/test.csv') x_trn = df_trn.ix[:, 1:].values y_trn = df_trn.ix[:, 0].values y_trn_cat = to_categorical(y_trn) tf.reset_default_graph() net = tflearn.input_data([None, 784]) net = tflearn.fully_connected(net, 256, activation='ReLU') net = tflearn.fully_connected(net, 128, activation='ReLU') net = tflearn.fully_connected(net, 64, activation='ReLU') net = tflearn.fully_connected(net, 10, activation='softmax') net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') model = tflearn.DNN(net) model.fit(x_trn, y_trn_cat, validation_set=0, show_metric=True, batch_size=1000, n_epoch=100) np.argmax(model.predict(df_tst), 1)[0:100]
code