path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128032771/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
sns.displot(data=train, x='Weight', hue='Gender', kde=True) | code |
128032771/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
sns.displot(data=train, x='Height', hue='Gender', kde=True) | code |
128032771/cell_3 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import warnings
import pandas as pd
import numpy as np
import random
import os
import gc
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Ridge
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore') | code |
128032771/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
sns.displot(data=train, x='Age', kde=True) | code |
128032771/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import os
import random
import seaborn as sns
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
seed_everything(42)
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
mask = np.zeros_like(train.corr())
mask[np.triu_indices_from(mask)] = True
plt.plot(train['Age'], train['Calories_Burned'], 'g*')
plt.title('Age vs Calories Burned')
plt.xlabel('Age')
plt.ylabel('Calories Burned')
plt.show()
plt.plot(train['Height'], train['Calories_Burned'], 'g*')
plt.title('Height vs Calories Burned')
plt.xlabel('Height')
plt.ylabel('Calories Burned')
plt.show()
plt.plot(train['Weight'], train['Calories_Burned'], 'g*')
plt.title('Weight vs Calories Burned')
plt.xlabel('Weight')
plt.ylabel('Calories Burned')
plt.show() | code |
128032771/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
sns.displot(data=train, x='Calories_Burned', kde=True) | code |
128032771/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import os
import random
import seaborn as sns
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
seed_everything(42)
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
mask = np.zeros_like(train.corr())
mask[np.triu_indices_from(mask)] = True
plt.figure(figsize=(10, 10))
sns.heatmap(train.corr(), annot=True, cmap='YlOrRd')
plt.show() | code |
128032771/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2, 3, figsize=(10, 10))
sns.boxplot(y=train['Age'], ax=axes[0][0])
sns.boxplot(y=train['Height'], ax=axes[0][1])
sns.boxplot(y=train['Weight'], ax=axes[0][2])
sns.boxplot(y=train['Duration'], ax=axes[1][0])
sns.boxplot(y=train['Heart_Rate'], ax=axes[1][1])
sns.boxplot(y=train['Body_Temp'], ax=axes[1][2])
plt.tight_layout()
plt.show() | code |
72118116/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
df['type'].unique() | code |
72118116/cell_29 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
df['type'] = pd.get_dummies(df['type'], drop_first=True)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
lr = lr.score(X_test, y_test)
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
svm = svm.score(X_test, y_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train, y_train)
rf = rf.score(X_test, y_test)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
knn = knn.score(X_test, y_test)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
dt = dt.score(X_test, y_test)
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train, y_train)
nb = nb.score(X_test, y_test)
models = pd.DataFrame({'Model': ['Linear Regression', 'KNN', 'SVM', 'Random Forest', 'Naive Bayes', 'Decision Tree'], 'Score': [lr, knn, svm, rf, nb, dt]})
models.sort_values(by='Score', ascending=False)
plt.figure(figsize=(10, 5))
sns.barplot(x='Model', y='Score', data=models)
plt.show() | code |
72118116/cell_19 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
df['type'] = pd.get_dummies(df['type'], drop_first=True)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df.drop('quality', axis=1)) | code |
72118116/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72118116/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape | code |
72118116/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
df['type'] = pd.get_dummies(df['type'], drop_first=True)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
lr = lr.score(X_test, y_test)
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
svm = svm.score(X_test, y_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train, y_train)
rf = rf.score(X_test, y_test)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
knn = knn.score(X_test, y_test)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
dt = dt.score(X_test, y_test)
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train, y_train)
nb = nb.score(X_test, y_test)
models = pd.DataFrame({'Model': ['Linear Regression', 'KNN', 'SVM', 'Random Forest', 'Naive Bayes', 'Decision Tree'], 'Score': [lr, knn, svm, rf, nb, dt]})
models.sort_values(by='Score', ascending=False) | code |
72118116/cell_8 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
df.describe() | code |
72118116/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
plt.figure(figsize=(10, 5))
sns.heatmap(df.corr(), cmap='coolwarm')
plt.show() | code |
72118116/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
plt.figure(figsize=(5, 3))
sns.countplot(x='quality', data=df) | code |
72118116/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
df['type'] = pd.get_dummies(df['type'], drop_first=True)
df.head() | code |
72118116/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
df.fillna(df.mean(), inplace=True)
plt.figure(figsize=(10, 7))
sns.countplot(x='type', data=df, palette='hls')
plt.show() | code |
72118116/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.shape
missing_val_count_by_column = df.isnull().sum()
print(missing_val_count_by_column[missing_val_count_by_column > 0]) | code |
72118116/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality/winequalityN.csv')
df.head() | code |
18115505/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts()
train_set[['Pclass', 'Survived']].groupby(['Pclass']).mean().sort_values(by='Survived', ascending=False) | code |
18115505/cell_9 | [
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | 342 * 100 / 891 | code |
18115505/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any() | code |
18115505/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.head() | code |
18115505/cell_11 | [
"text_plain_output_1.png"
] | 549 * 100 / 891 | code |
18115505/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts() | code |
18115505/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts()
train_set[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False) | code |
18115505/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts()
pd.DataFrame(train_set['Survived'], index=train_set.Age).plot(kind='hist') | code |
18115505/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.info() | code |
18115505/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts()
train_set[['Sex', 'Survived']].groupby(['Sex']).mean().plot(kind='bar')
train_set[['Pclass', 'Survived']].groupby(['Pclass']).mean().sort_values(by='Survived', ascending=False).plot(kind='bar')
train_set[['SibSp', 'Survived']].groupby(['SibSp']).mean().sort_values(by='Survived', ascending=False).plot(kind='bar')
plt.ylabel('Survived') | code |
18115505/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts()
train_set[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False) | code |
18115505/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
train_set.Survived.value_counts()
train_set[['Sex', 'Survived']].groupby(['Sex']).mean() | code |
18115505/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().any()
print('Total null entries:\n')
print('Age :%d\nCabin:%d\nEmbarked:%d' % (train_set.Age.isnull().sum(), train_set.Cabin.isnull().sum(), train_set.Embarked.isnull().sum())) | code |
327240/cell_21 | [
"image_output_1.png"
] | import pandas as ps
import pylab
import string
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
fileR['Date'] = ps.to_datetime(fileR['Date'])
fileR['year'] = fileR['Date'].dt.year
fileR['month'] = fileR['Date'].dt.month
fileR['day'] = fileR['Date'].dt.day
sub_years = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
years_legend = list(string.ascii_letters[:len(sub_years)])
fileR['year_group'] = ''
for i in range(0, len(sub_years) - 1):
fileR.loc[(sub_years[i + 1] > fileR['year']) & (fileR['year'] >= sub_years[i]), ['year_group']] = years_legend[i]
subfile = fileR[['Aboard', 'Fatalities', 'year']].groupby('year').sum()
subfile['survived'] = subfile['Aboard'] - subfile['Fatalities']
pylab.plot(subfile['Aboard'], label='Aboard')
pylab.plot(subfile['Fatalities'], label='Fatalities')
pylab.plot(subfile['survived'], label='Survived')
pylab.legend(loc='upper left') | code |
327240/cell_25 | [
"text_html_output_1.png"
] | import pandas as ps
import string
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
fileR['Date'] = ps.to_datetime(fileR['Date'])
fileR['year'] = fileR['Date'].dt.year
fileR['month'] = fileR['Date'].dt.month
fileR['day'] = fileR['Date'].dt.day
sub_years = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
years_legend = list(string.ascii_letters[:len(sub_years)])
fileR['year_group'] = ''
for i in range(0, len(sub_years) - 1):
fileR.loc[(sub_years[i + 1] > fileR['year']) & (fileR['year'] >= sub_years[i]), ['year_group']] = years_legend[i]
countrySub = fileR.groupby('countries').sum()
dangerousCountries = countrySub.sort_values('Fatalities', ascending=False)
dangerousCountries['Fatalities'][:20].plot(kind='bar', color='g', fontsize=14, title='Highest fatalities based on the location') | code |
327240/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as ps
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
print(fileR.head()) | code |
327240/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib
import pandas as ps
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
matplotlib.rcParams['figure.figsize'] = (10, 5)
ops = fileR['Operator'].value_counts()[:20]
ops.plot(kind='bar', legend='Operator', color='g', fontsize=10, title='Operators with Highest Crashes') | code |
327240/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as ps
import string
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
fileR['Date'] = ps.to_datetime(fileR['Date'])
fileR['year'] = fileR['Date'].dt.year
fileR['month'] = fileR['Date'].dt.month
fileR['day'] = fileR['Date'].dt.day
sub_years = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
years_legend = list(string.ascii_letters[:len(sub_years)])
fileR['year_group'] = ''
for i in range(0, len(sub_years) - 1):
fileR.loc[(sub_years[i + 1] > fileR['year']) & (fileR['year'] >= sub_years[i]), ['year_group']] = years_legend[i]
subfile2 = fileR[['Aboard', 'Fatalities', 'year', 'Operator', 'Type']].groupby('Operator').sum()
subfile2['survived'] = subfile2['Aboard'] - subfile2['Fatalities']
subfile2['percentageSurvived'] = subfile2['survived'] / subfile2['Aboard']
subfile3 = subfile2[subfile2['year'] > max(fileR['year'])]
highSurvive = subfile3.sort_values(by='percentageSurvived', ascending=False)[:20]
highSurvive
highSurvive['percentageSurvived'].plot(kind='bar', color='g', fontsize=14, title='Operators with high percentage of survivers') | code |
327240/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as ps
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
types = fileR['Type'].value_counts()[:20]
types.plot(kind='bar', legend='Types', color='g', fontsize=10, title='Types with Highest Crashes') | code |
327240/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as ps
import string
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
fileR['Date'] = ps.to_datetime(fileR['Date'])
fileR['year'] = fileR['Date'].dt.year
fileR['month'] = fileR['Date'].dt.month
fileR['day'] = fileR['Date'].dt.day
sub_years = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
years_legend = list(string.ascii_letters[:len(sub_years)])
fileR['year_group'] = ''
for i in range(0, len(sub_years) - 1):
fileR.loc[(sub_years[i + 1] > fileR['year']) & (fileR['year'] >= sub_years[i]), ['year_group']] = years_legend[i]
subfile2 = fileR[['Aboard', 'Fatalities', 'year', 'Operator', 'Type']].groupby('Operator').sum()
subfile2['survived'] = subfile2['Aboard'] - subfile2['Fatalities']
subfile2['percentageSurvived'] = subfile2['survived'] / subfile2['Aboard']
subfile3 = subfile2[subfile2['year'] > max(fileR['year'])]
highSurvive = subfile3.sort_values(by='percentageSurvived', ascending=False)[:20]
highSurvive | code |
327240/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib
import pandas as ps
import string
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
matplotlib.rcParams['figure.figsize'] = (10, 5)
ops = fileR['Operator'].value_counts()[:20]
fileR['Date'] = ps.to_datetime(fileR['Date'])
fileR['year'] = fileR['Date'].dt.year
fileR['month'] = fileR['Date'].dt.month
fileR['day'] = fileR['Date'].dt.day
sub_years = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
years_legend = list(string.ascii_letters[:len(sub_years)])
fileR['year_group'] = ''
for i in range(0, len(sub_years) - 1):
fileR.loc[(sub_years[i + 1] > fileR['year']) & (fileR['year'] >= sub_years[i]), ['year_group']] = years_legend[i]
matplotlib.rcParams['figure.figsize'] = (10, 5)
fileR[['Fatalities', 'year_group']].groupby('year_group').count().plot(kind='bar', fontsize=14, legend=True, color='g', title='Fatalities based on decades') | code |
327240/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as ps
import string
fileR = ps.read_csv('../input/3-Airplane_Crashes_Since_1908.txt', sep=',')
fileR['Date'] = ps.to_datetime(fileR['Date'])
fileR['year'] = fileR['Date'].dt.year
fileR['month'] = fileR['Date'].dt.month
fileR['day'] = fileR['Date'].dt.day
sub_years = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
years_legend = list(string.ascii_letters[:len(sub_years)])
fileR['year_group'] = ''
for i in range(0, len(sub_years) - 1):
fileR.loc[(sub_years[i + 1] > fileR['year']) & (fileR['year'] >= sub_years[i]), ['year_group']] = years_legend[i]
labels = ['1900-1910', '1910-1920', '1920-1930', '1930-1940', '1940-1950', '1950-1960', '1960-1970', '1970-1980', '1980-1990', '1990-2000', '2000-2010']
sizes = fileR[['Fatalities', 'year_group']].groupby('year_group').sum()
explode = (0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0)
colors = cm.Set1(np.arange(20) / 30.0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=45)
plt.axis('equal')
plt.show() | code |
2037064/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('../input/all_energy_statistics.csv')
df.columns = ['country', 'commodity', 'year', 'unit', 'quantity', 'footnotes', 'category']
df_solar = df[df.commodity.str.contains('Electricity - total net installed capacity of electric power plants, solar')]
df_max = df_solar.groupby(pd.Grouper(key='country'))['quantity'].max()
df_max = df_max.sort_values(ascending=False)
df_max = df_max[:6]
df_max.index.values
commodity_string = 'Electricity - total net installed capacity of electric power plants, solar'
df_max = df[df.commodity.str.contains(commodity_string)].groupby(pd.Grouper(key='country'))['quantity'].max().sort_values(ascending=False)[:6]
range = np.arange(2000, 2015)
dict_major = {}
for c in df_max.index.values:
read_index = df_solar[df_solar.commodity.str.contains(commodity_string) & df_solar.country.str.contains(c + '$')].year
read_data = df_solar[df_solar.commodity.str.contains(commodity_string) & df_solar.country.str.contains(c + '$')].quantity
read_data.index = read_index
prod = read_data.reindex(index=range, fill_value=0)
dict_major.update({c: prod.values})
df_major = pd.DataFrame(dict_major)
df_major.index = range
df_major
ax = df_major.plot(kind='bar', x=df_major.index, stacked=False, figsize=(15, 9))
plt.title('Solar energy production')
plt.xlabel('Year')
plt.ylabel('Megawatts')
ax.yaxis.grid(False, 'minor')
ax.yaxis.grid(True, 'major') | code |
2037064/cell_7 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/all_energy_statistics.csv')
df.columns = ['country', 'commodity', 'year', 'unit', 'quantity', 'footnotes', 'category']
df_solar = df[df.commodity.str.contains('Electricity - total net installed capacity of electric power plants, solar')]
df_max = df_solar.groupby(pd.Grouper(key='country'))['quantity'].max()
df_max = df_max.sort_values(ascending=False)
df_max = df_max[:6]
df_max.index.values
commodity_string = 'Electricity - total net installed capacity of electric power plants, solar'
df_max = df[df.commodity.str.contains(commodity_string)].groupby(pd.Grouper(key='country'))['quantity'].max().sort_values(ascending=False)[:6]
range = np.arange(2000, 2015)
dict_major = {}
for c in df_max.index.values:
read_index = df_solar[df_solar.commodity.str.contains(commodity_string) & df_solar.country.str.contains(c + '$')].year
read_data = df_solar[df_solar.commodity.str.contains(commodity_string) & df_solar.country.str.contains(c + '$')].quantity
read_data.index = read_index
prod = read_data.reindex(index=range, fill_value=0)
dict_major.update({c: prod.values})
df_major = pd.DataFrame(dict_major)
df_major.index = range
df_major | code |
2037064/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/all_energy_statistics.csv')
df.columns = ['country', 'commodity', 'year', 'unit', 'quantity', 'footnotes', 'category']
df_solar = df[df.commodity.str.contains('Electricity - total net installed capacity of electric power plants, solar')]
df_max = df_solar.groupby(pd.Grouper(key='country'))['quantity'].max()
df_max = df_max.sort_values(ascending=False)
df_max = df_max[:6]
df_max.index.values | code |
33102430/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test_PassengerId = test['PassengerId']
train.columns
train.info() | code |
33102430/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test_PassengerId = test['PassengerId']
train.columns
train.head() | code |
33102430/cell_2 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import seaborn as sns
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33102430/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test_PassengerId = test['PassengerId']
train.columns
train.describe() | code |
33102430/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test_PassengerId = test['PassengerId']
train.columns | code |
50219234/cell_42 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
california_df.isnull().sum()
# Our goal here is to extract the numeric columns so we can boxplot them in order to detect outliers.
# Numeric column extraction
numeric_columns = california_df.select_dtypes(include = ['float64', 'int']).columns
len_numeric_columns = len(numeric_columns)
# Boxplotting
fig = plt.figure(figsize = (15,10))
# Set number of columns you want to plot
n_cols = 3
n_plot_rows = len_numeric_columns//n_cols
n_plot_rows
for i, column in enumerate(numeric_columns):
ax = fig.add_subplot(n_plot_rows, n_cols, i+1)
sns.boxplot(y = california_df[column], orient = 'h', ax = ax)
fig.tight_layout()
X = california.data
y = california.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
fig = plt.figure(figsize = (15,10))
print("Cross validation results:")
cv_results = pd.DataFrame(neigh_CV.cv_results_)
accs = pd.DataFrame(columns=["Neighbors"])
# Mostramos los resultados
melted_accs = accs.assign(**{'Neighbors': pd.DataFrame(neigh_CV.cv_results_['params']).unstack().values,
"Training R2": cv_results.mean_train_R2,
"Validation R2": cv_results.mean_test_R2,
"Traning MSE": cv_results.mean_train_MSE,
"Validation MSE": cv_results.mean_test_MSE}) \
.melt('Neighbors', value_vars = ['Traning MSE', 'Validation MSE'], var_name="Type", value_name="MSE")
g = sns.lineplot(x="Neighbors", y="MSE", hue='Type', data=melted_accs)
from sklearn.ensemble import RandomForestRegressor
nfold = 5
param_grid = [{'n_estimators': [3, 10, 30, 100, 150], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30, 100], 'max_features': [2, 3, 4]}]
grid_search = GridSearchCV(RandomForestRegressor(), param_grid, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = grid_search.predict(X_test_rs)
y_est_train = grid_search.predict(X_train_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = grid_search.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, y_est_train)
R2_coeff_train = grid_search.score(X_train_rs, y_train)
RF_best_params = grid_search.best_params_
df_metrics = pd.concat( [pd.DataFrame(grid_search.cv_results_['params']),
pd.DataFrame({'train_MSE': grid_search.cv_results_['mean_train_MSE']}),
pd.DataFrame({'test_MSE': grid_search.cv_results_['mean_test_MSE']})
], axis = 1
)
m = ((df_metrics.max_features == 3) & (df_metrics.bootstrap == False))
df_metrics.loc[:, ['n_estimators', 'test_MSE']]
#df_metrics = df_metrics.melt('n_estimators', value_vars = ['train_MSE', 'test_MSE'], var_name = 'Type')
fig = plt.figure(figsize = (15,10))
g = sns.lineplot(x="n_estimators", y="test_MSE", data=df_metrics, err_style = None)
# We use this function made by Sklearn (https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html)
# Graphics "Scalabilty of the model" and "Performance of the model" have been removed.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate 3 plots: the test and training learning curve, the training
samples vs fit times curve, the fit times vs score curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
axes : array of 3 axes, optional (default=None)
Axes to use for plotting the curves.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
if axes is None:
_, axes = plt.subplots(1, 1, figsize=(15, 10))
axes.set_title(title)
if ylim is not None:
axes.set_ylim(*ylim)
axes.set_xlabel("Training examples")
axes.set_ylabel("Score")
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
# Plot learning curve
axes.grid()
axes.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes.legend(loc="best")
return plt
title = 'Learning Curves Random Forest'
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = grid_search.best_estimator_
plot_learning_curve(estimator, title, X_train_rs, y_train, cv=cv, n_jobs=4)
plt.show() | code |
50219234/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.figure(figsize=(15, 10))
plt.scatter(california_df['Longitude'], california_df['Latitude'], c=california_df['price'], s=california_df['Population'] / 10, cmap='viridis')
plt.colorbar()
plt.xlabel('longitude')
plt.ylabel('latitude')
plt.title('house price on basis of geo-coordinates')
plt.show() | code |
50219234/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | print(california.DESCR) | code |
50219234/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
california_df.head(3) | code |
50219234/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
california_df.isnull().sum()
# Our goal here is to extract the numeric columns so we can boxplot them in order to detect outliers.
# Numeric column extraction
numeric_columns = california_df.select_dtypes(include = ['float64', 'int']).columns
len_numeric_columns = len(numeric_columns)
# Boxplotting
fig = plt.figure(figsize = (15,10))
# Set number of columns you want to plot
n_cols = 3
n_plot_rows = len_numeric_columns//n_cols
n_plot_rows
for i, column in enumerate(numeric_columns):
ax = fig.add_subplot(n_plot_rows, n_cols, i+1)
sns.boxplot(y = california_df[column], orient = 'h', ax = ax)
fig.tight_layout()
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
fig = plt.figure(figsize = (15,10))
print("Cross validation results:")
cv_results = pd.DataFrame(neigh_CV.cv_results_)
accs = pd.DataFrame(columns=["Neighbors"])
# Mostramos los resultados
melted_accs = accs.assign(**{'Neighbors': pd.DataFrame(neigh_CV.cv_results_['params']).unstack().values,
"Training R2": cv_results.mean_train_R2,
"Validation R2": cv_results.mean_test_R2,
"Traning MSE": cv_results.mean_train_MSE,
"Validation MSE": cv_results.mean_test_MSE}) \
.melt('Neighbors', value_vars = ['Traning MSE', 'Validation MSE'], var_name="Type", value_name="MSE")
g = sns.lineplot(x="Neighbors", y="MSE", hue='Type', data=melted_accs)
from sklearn.ensemble import RandomForestRegressor
nfold = 5
param_grid = [{'n_estimators': [3, 10, 30, 100, 150], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30, 100], 'max_features': [2, 3, 4]}]
grid_search = GridSearchCV(RandomForestRegressor(), param_grid, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = grid_search.predict(X_test_rs)
y_est_train = grid_search.predict(X_train_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = grid_search.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, y_est_train)
R2_coeff_train = grid_search.score(X_train_rs, y_train)
RF_best_params = grid_search.best_params_
df_metrics = pd.concat([pd.DataFrame(grid_search.cv_results_['params']), pd.DataFrame({'train_MSE': grid_search.cv_results_['mean_train_MSE']}), pd.DataFrame({'test_MSE': grid_search.cv_results_['mean_test_MSE']})], axis=1)
m = (df_metrics.max_features == 3) & (df_metrics.bootstrap == False)
df_metrics.loc[:, ['n_estimators', 'test_MSE']]
fig = plt.figure(figsize=(15, 10))
g = sns.lineplot(x='n_estimators', y='test_MSE', data=df_metrics, err_style=None) | code |
50219234/cell_48 | [
"image_output_1.png"
] | from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler
from xgboost import XGBRegressor
import numpy as np
import numpy as np
import pandas as pd
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
X = california.data
y = california.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from xgboost import XGBRegressor
xg_regressor = XGBRegressor(max_depth=6, n_estimators=500, learning_rate=0.01, silent=True)
xg_regressor.fit(X_train_rs, y_train)
print('Feature importance:')
for name, score in zip(california['feature_names'], xg_regressor.feature_importances_):
print(name, '{0:.2f} %'.format(score * 100))
print('\n' * 2, 'Scoring:')
print('MSE for test {0:.2f}'.format(mean_squared_error(y_test, xg_regressor.predict(X_test_rs))))
print('R-squared for test {0:.2f}'.format(xg_regressor.score(X_test_rs, y_test)))
print('MSE for train {0:.2f}'.format(mean_squared_error(y_train, xg_regressor.predict(X_train_rs))))
print('R-squared for train {0:.2f}'.format(xg_regressor.score(X_train_rs, y_train))) | code |
50219234/cell_19 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
california_df.isnull().sum()
numeric_columns = california_df.select_dtypes(include=['float64', 'int']).columns
len_numeric_columns = len(numeric_columns)
fig = plt.figure(figsize=(15, 10))
n_cols = 3
n_plot_rows = len_numeric_columns // n_cols
n_plot_rows
for i, column in enumerate(numeric_columns):
ax = fig.add_subplot(n_plot_rows, n_cols, i + 1)
sns.boxplot(y=california_df[column], orient='h', ax=ax)
fig.tight_layout() | code |
50219234/cell_45 | [
"image_output_1.png"
] | from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import numpy as np
import pandas as pd
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
from sklearn.ensemble import RandomForestRegressor
nfold = 5
param_grid = [{'n_estimators': [3, 10, 30, 100, 150], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30, 100], 'max_features': [2, 3, 4]}]
grid_search = GridSearchCV(RandomForestRegressor(), param_grid, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = grid_search.predict(X_test_rs)
y_est_train = grid_search.predict(X_train_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = grid_search.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, y_est_train)
R2_coeff_train = grid_search.score(X_train_rs, y_train)
RF_best_params = grid_search.best_params_
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
ada_reg = AdaBoostRegressor(DecisionTreeRegressor(), n_estimators=200, learning_rate=0.5).fit(X_train_rs, y_train)
y_est = ada_reg.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = ada_reg.score(X_test_rs, y_test)
print('MSE : ' + str(MSE_tst))
print('R2 score: ' + str(R2_coeff)) | code |
50219234/cell_32 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
california_df.isnull().sum()
# Our goal here is to extract the numeric columns so we can boxplot them in order to detect outliers.
# Numeric column extraction
numeric_columns = california_df.select_dtypes(include = ['float64', 'int']).columns
len_numeric_columns = len(numeric_columns)
# Boxplotting
fig = plt.figure(figsize = (15,10))
# Set number of columns you want to plot
n_cols = 3
n_plot_rows = len_numeric_columns//n_cols
n_plot_rows
for i, column in enumerate(numeric_columns):
ax = fig.add_subplot(n_plot_rows, n_cols, i+1)
sns.boxplot(y = california_df[column], orient = 'h', ax = ax)
fig.tight_layout()
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
fig = plt.figure(figsize=(15, 10))
print('Cross validation results:')
cv_results = pd.DataFrame(neigh_CV.cv_results_)
accs = pd.DataFrame(columns=['Neighbors'])
melted_accs = accs.assign(**{'Neighbors': pd.DataFrame(neigh_CV.cv_results_['params']).unstack().values, 'Training R2': cv_results.mean_train_R2, 'Validation R2': cv_results.mean_test_R2, 'Traning MSE': cv_results.mean_train_MSE, 'Validation MSE': cv_results.mean_test_MSE}).melt('Neighbors', value_vars=['Traning MSE', 'Validation MSE'], var_name='Type', value_name='MSE')
g = sns.lineplot(x='Neighbors', y='MSE', hue='Type', data=melted_accs) | code |
50219234/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
california_df.isnull().sum() | code |
50219234/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
import numpy as np
import pandas as pd
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
print('MSE : ' + str(MSE_tst))
print('R2 score: ' + str(R2_coeff))
print('Selected value of k: ' + str(K_CV)) | code |
50219234/cell_53 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import learning_curve
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
california_df.isnull().sum()
# Our goal here is to extract the numeric columns so we can boxplot them in order to detect outliers.
# Numeric column extraction
numeric_columns = california_df.select_dtypes(include = ['float64', 'int']).columns
len_numeric_columns = len(numeric_columns)
# Boxplotting
fig = plt.figure(figsize = (15,10))
# Set number of columns you want to plot
n_cols = 3
n_plot_rows = len_numeric_columns//n_cols
n_plot_rows
for i, column in enumerate(numeric_columns):
ax = fig.add_subplot(n_plot_rows, n_cols, i+1)
sns.boxplot(y = california_df[column], orient = 'h', ax = ax)
fig.tight_layout()
X = california.data
y = california.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
fig = plt.figure(figsize = (15,10))
print("Cross validation results:")
cv_results = pd.DataFrame(neigh_CV.cv_results_)
accs = pd.DataFrame(columns=["Neighbors"])
# Mostramos los resultados
melted_accs = accs.assign(**{'Neighbors': pd.DataFrame(neigh_CV.cv_results_['params']).unstack().values,
"Training R2": cv_results.mean_train_R2,
"Validation R2": cv_results.mean_test_R2,
"Traning MSE": cv_results.mean_train_MSE,
"Validation MSE": cv_results.mean_test_MSE}) \
.melt('Neighbors', value_vars = ['Traning MSE', 'Validation MSE'], var_name="Type", value_name="MSE")
g = sns.lineplot(x="Neighbors", y="MSE", hue='Type', data=melted_accs)
from sklearn.ensemble import RandomForestRegressor
nfold = 5
param_grid = [{'n_estimators': [3, 10, 30, 100, 150], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30, 100], 'max_features': [2, 3, 4]}]
grid_search = GridSearchCV(RandomForestRegressor(), param_grid, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = grid_search.predict(X_test_rs)
y_est_train = grid_search.predict(X_train_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = grid_search.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, y_est_train)
R2_coeff_train = grid_search.score(X_train_rs, y_train)
RF_best_params = grid_search.best_params_
df_metrics = pd.concat( [pd.DataFrame(grid_search.cv_results_['params']),
pd.DataFrame({'train_MSE': grid_search.cv_results_['mean_train_MSE']}),
pd.DataFrame({'test_MSE': grid_search.cv_results_['mean_test_MSE']})
], axis = 1
)
m = ((df_metrics.max_features == 3) & (df_metrics.bootstrap == False))
df_metrics.loc[:, ['n_estimators', 'test_MSE']]
#df_metrics = df_metrics.melt('n_estimators', value_vars = ['train_MSE', 'test_MSE'], var_name = 'Type')
fig = plt.figure(figsize = (15,10))
g = sns.lineplot(x="n_estimators", y="test_MSE", data=df_metrics, err_style = None)
# We use this function made by Sklearn (https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html)
# Graphics "Scalabilty of the model" and "Performance of the model" have been removed.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate 3 plots: the test and training learning curve, the training
samples vs fit times curve, the fit times vs score curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
axes : array of 3 axes, optional (default=None)
Axes to use for plotting the curves.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
if axes is None:
_, axes = plt.subplots(1, 1, figsize=(15, 10))
axes.set_title(title)
if ylim is not None:
axes.set_ylim(*ylim)
axes.set_xlabel("Training examples")
axes.set_ylabel("Score")
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
# Plot learning curve
axes.grid()
axes.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes.legend(loc="best")
return plt
params = {'n_estimators': [100, 150, 200, 300, 350], 'learning_rate': np.linspace(0.1, 1.0, 10), 'min_child_weight': [1, 5, 10], 'gamma': [0, 0.5, 1, 1.5, 2, 5], 'subsample': [0.6, 0.8, 1.0], 'colsample_bytree': [0.6, 0.8, 1.0], 'max_depth': np.arange(3, 11, 1)}
from sklearn.model_selection import RandomizedSearchCV
xgb_reg = XGBRegressor(silent=True)
xgb_random = RandomizedSearchCV(estimator=xgb_reg, param_distributions=params, n_iter=200, cv=5, verbose=0, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train) | code |
50219234/cell_27 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.preprocessing import RobustScaler
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
print('TESTING METRICS')
print('Metrics without scaling:')
print('MSE : ' + str(MSE_tst))
print('R2 score: ' + str(R2_coeff))
print('\nMetrics with RobustScaler:')
print('MSE : ' + str(MSE_tst_rs))
print('R2 score: ' + str(R2_coeff_rs))
print('\nTRAINING METRICS')
print('Metrics without scaling:')
print('MSE : ' + str(MSE_train))
print('R2 score: ' + str(R2_train_coeff)) | code |
50219234/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error, r2_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
import numpy as np
import pandas as pd
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
rs = RobustScaler()
X_train_rs = rs.fit_transform(X_train)
X_test_rs = rs.transform(X_test)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lg = LinearRegression()
lg.fit(X_train, y_train)
lg_rs = LinearRegression()
lg_rs.fit(X_train_rs, y_train)
y_est = lg.predict(X_test)
y_est_rs = lg_rs.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = lg.score(X_test, y_test)
MSE_tst_rs = mean_squared_error(y_test, y_est_rs)
R2_coeff_rs = lg_rs.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, lg.predict(X_train))
R2_train_coeff = lg.score(X_train, y_train)
from sklearn.neighbors import KNeighborsRegressor
k_max = 20
rang_K = np.arange(1, k_max + 1)
tuned_parameters = [{'n_neighbors': rang_K}]
nfold = 5
neigh_CV = GridSearchCV(KNeighborsRegressor(), tuned_parameters, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = neigh_CV.predict(X_test_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = neigh_CV.score(X_test_rs, y_test)
K_CV = neigh_CV.best_params_['n_neighbors']
from sklearn.ensemble import RandomForestRegressor
nfold = 5
param_grid = [{'n_estimators': [3, 10, 30, 100, 150], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30, 100], 'max_features': [2, 3, 4]}]
grid_search = GridSearchCV(RandomForestRegressor(), param_grid, cv=nfold, scoring={'MSE': make_scorer(mean_squared_error), 'R2': make_scorer(r2_score)}, return_train_score=True, refit='R2', n_jobs=-1).fit(X_train_rs, y_train)
y_est = grid_search.predict(X_test_rs)
y_est_train = grid_search.predict(X_train_rs)
MSE_tst = mean_squared_error(y_test, y_est)
R2_coeff = grid_search.score(X_test_rs, y_test)
MSE_train = mean_squared_error(y_train, y_est_train)
R2_coeff_train = grid_search.score(X_train_rs, y_train)
RF_best_params = grid_search.best_params_
print('MSE test : ' + str(MSE_tst))
print('R2 test score: ' + str(R2_coeff))
print('MSE train : ' + str(MSE_train))
print('R2 train score: ' + str(R2_coeff_train))
print('Selected value of best params: ' + str(RF_best_params)) | code |
50219234/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
concat_california_array = np.concatenate((california.data, np.reshape(california.target, (california.target.shape[0], 1))), axis=1)
california_df = pd.DataFrame(concat_california_array, columns=california.feature_names + ['price'])
plt.colorbar()
plt.figure(figsize=(11, 7))
sns.heatmap(cbar=False, annot=True, data=california_df.corr() * 100, cmap='coolwarm')
plt.title('% Corelation Matrix')
plt.show() | code |
50231668/cell_4 | [
"text_plain_output_1.png"
] | def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
element = 35
array = list(range(1, 1000))
n = 1000
print('Searching for {}'.format(element))
print('Index of {}: {}'.format(element, binary_search_recursive(array, element, 0, len(array)))) | code |
50231668/cell_6 | [
"text_plain_output_1.png"
] | def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
def linearsearch(arr, x):
for i in range(len(arr)):
if arr[i] == x:
return i
return -1
arr = ['10', '20', '30', '40', '50', '60', '70']
x = '50'
def binarySearch(arr, left, right, x):
if right >= left:
mid = left + (right - left) // 2
if arr[mid] == x:
return mid
elif arr[mid] > x:
return binarySearch(arr, left, mid - 1, x)
else:
return binarySearch(arr, mid + 1, right, x)
else:
return -1
arr = [10, 20, 30, 40, 50, 60, 70]
x = 50
result = binarySearch(arr, 0, len(arr) - 1, x)
if result != -1:
print('Element is present at index % d' % result)
else:
print('Element is not present in array') | code |
50231668/cell_2 | [
"text_plain_output_1.png"
] | for num in range(1, 1001):
if num > 0:
for i in range(1000, num):
if num % i == 0:
break
else:
print(num) | code |
50231668/cell_7 | [
"text_plain_output_1.png"
] | def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
element = 35
array = list(range(1, 1000))
n = 1000
def insertionSort(array):
for step in range(1, len(array)):
key = array[step]
j = step - 1
while j >= 0 and key < array[j]:
array[j + 1] = array[j]
j = j - 1
array[j + 1] = key
data = [10, 5, 30, 15, 50, 6, 25]
insertionSort(data)
print('Sorted Array in Ascending Order:')
print(data) | code |
50231668/cell_8 | [
"text_plain_output_1.png"
] | def binary_search_recursive(array, element, start, end):
if start > end:
return -1
mid = (start + end) // 2
if element == array[mid]:
return mid
if element < array[mid]:
return binary_search_recursive(array, element, start, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, end)
element = 35
array = list(range(1, 1000))
n = 1000
def insertionSort(array):
for step in range(1, len(array)):
key = array[step]
j = step - 1
while j >= 0 and key < array[j]:
array[j + 1] = array[j]
j = j - 1
array[j + 1] = key
data = [10, 5, 30, 15, 50, 6, 25]
insertionSort(data)
def selectionSort(array, size):
for step in range(size):
min_idx = step
for i in range(step + 1, size):
if array[i] < array[min_idx]:
min_idx = i
array[step], array[min_idx] = (array[min_idx], array[step])
data = [10, 5, 30, 15, 50, 6, 25]
size = len(data)
selectionSort(data, size)
print('Sorted Array in Ascending Order:')
print(data) | code |
50231668/cell_5 | [
"text_plain_output_1.png"
] | def linearsearch(arr, x):
for i in range(len(arr)):
if arr[i] == x:
return i
return -1
arr = ['10', '20', '30', '40', '50', '60', '70']
x = '50'
print('element nya ' + str(linearsearch(arr, x))) | code |
2041736/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
btc['ohlc_average'] = (btc['open'] + btc['high'] + btc['low'] + btc['close']) / 4 | code |
2041736/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
btc.tail() | code |
2041736/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
df.tail() | code |
2041736/cell_23 | [
"text_html_output_1.png"
] | from datetime import datetime, timedelta
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
sns.set()
sns.set_style('whitegrid')
from sklearn import preprocessing
btc.dropna(inplace=True)
X = btc.drop('Price_After_Month', axis=1)
X = preprocessing.scale(X)
y = btc['Price_After_Month']
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(n_estimators=200, random_state=101)
reg.fit(X_train, y_train)
accuracy = reg.score(X_test, y_test)
accuracy = accuracy * 100
accuracy = float('{0:.4f}'.format(accuracy))
preds = reg.predict(X_test)
X_30 = X[-30:]
forecast = reg.predict(X_30)
from datetime import datetime, timedelta
last_date = btc.iloc[-1].name
modified_date = last_date + timedelta(days=1)
date = pd.date_range(modified_date, periods=30, freq='D')
df1 = pd.DataFrame(forecast, columns=['Forecast'], index=date)
btc = btc.append(df1)
btc['close'].plot(figsize=(12, 6), label='Close')
btc['Forecast'].plot(label='forecast')
plt.legend() | code |
2041736/cell_20 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(n_estimators=200, random_state=101)
reg.fit(X_train, y_train)
accuracy = reg.score(X_test, y_test)
accuracy = accuracy * 100
accuracy = float('{0:.4f}'.format(accuracy))
preds = reg.predict(X_test)
print('The prediction is:', preds[1], 'But the real value is:', y_test[1]) | code |
2041736/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True) | code |
2041736/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
sns.set()
sns.set_style('whitegrid')
btc['close'].plot(figsize=(12, 6), label='Close')
btc['close'].rolling(window=30).mean().plot(label='30 Day Avg')
plt.legend() | code |
2041736/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(n_estimators=200, random_state=101)
reg.fit(X_train, y_train)
accuracy = reg.score(X_test, y_test)
accuracy = accuracy * 100
accuracy = float('{0:.4f}'.format(accuracy))
print('Accuracy is:', accuracy, '%') | code |
2041736/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any() | code |
2041736/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import cross_validation
from sklearn import preprocessing
import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
from sklearn import preprocessing
btc.dropna(inplace=True)
X = btc.drop('Price_After_Month', axis=1)
X = preprocessing.scale(X)
y = btc['Price_After_Month']
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3, random_state=101) | code |
2041736/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape | code |
2041736/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
btc['Price_After_Month'] = btc['close'].shift(-30) | code |
2041736/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
btc.tail() | code |
2041736/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
df.head() | code |
2041736/cell_17 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
from sklearn import preprocessing
btc.dropna(inplace=True)
X = btc.drop('Price_After_Month', axis=1)
X = preprocessing.scale(X)
y = btc['Price_After_Month'] | code |
2041736/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
btc.head() | code |
2041736/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from datetime import datetime, timedelta
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
df = pd.read_csv('../input/crypto-markets.csv', parse_dates=['date'], index_col='date')
btc = df[df['symbol'] == 'BTC']
btc.drop(['volume', 'symbol', 'name', 'ranknow', 'market'], axis=1, inplace=True)
btc.isnull().any()
btc.shape
from sklearn import preprocessing
btc.dropna(inplace=True)
X = btc.drop('Price_After_Month', axis=1)
X = preprocessing.scale(X)
y = btc['Price_After_Month']
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor(n_estimators=200, random_state=101)
reg.fit(X_train, y_train)
accuracy = reg.score(X_test, y_test)
accuracy = accuracy * 100
accuracy = float('{0:.4f}'.format(accuracy))
preds = reg.predict(X_test)
X_30 = X[-30:]
forecast = reg.predict(X_30)
from datetime import datetime, timedelta
last_date = btc.iloc[-1].name
modified_date = last_date + timedelta(days=1)
date = pd.date_range(modified_date, periods=30, freq='D')
df1 = pd.DataFrame(forecast, columns=['Forecast'], index=date)
btc = btc.append(df1)
btc.tail() | code |
17118879/cell_21 | [
"text_plain_output_1.png"
] | import pathlib
import random
import tensorflow as tf
train_images_path = '../input/train_images'
test_images_path = '../input/test_images'
root_path = pathlib.Path(train_images_path)
for item in root_path.iterdir():
break
all_paths = list(root_path.glob('*.png'))
all_paths[0]
all_paths = [str(path) for path in all_paths]
random.shuffle(all_paths)
img = tf.read_file(all_paths)
img
def preprocess_image(image):
img_tensor = tf.image.decode_png(image, channels=3)
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor /= 255.0
return img_tensor
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
print('Resized', img_tensor.shape)
print(img_tensor.dtype) | code |
17118879/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sample_sub_df = pd.read_csv('../input/train.csv')
train_df.info() | code |
17118879/cell_23 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sample_sub_df = pd.read_csv('../input/train.csv')
train_df[train_df.id_code == '5d024177e214']
classes_dist = pd.DataFrame(train_df['diagnosis'].value_counts()/train_df.shape[0]).reset_index()
# barplot
ax = sns.barplot(x="index", y="diagnosis", data=classes_dist)
# Imbalanced dataset with 49% - no DR, 8% proliferative - i.e most severe DR
# Model Building - Need to do oversampling for minority classes
train_df.columns
train_df['image_path'] = '../input/train_images/' + train_df['id_code']
train_df.head(3) | code |
17118879/cell_20 | [
"text_plain_output_1.png"
] | import pathlib
import random
import tensorflow as tf
train_images_path = '../input/train_images'
test_images_path = '../input/test_images'
root_path = pathlib.Path(train_images_path)
for item in root_path.iterdir():
break
all_paths = list(root_path.glob('*.png'))
all_paths[0]
all_paths = [str(path) for path in all_paths]
random.shuffle(all_paths)
img = tf.read_file(all_paths)
img | code |
17118879/cell_29 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pathlib
import random
import seaborn as sns
import tensorflow as tf
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sample_sub_df = pd.read_csv('../input/train.csv')
train_images_path = '../input/train_images'
test_images_path = '../input/test_images'
train_df[train_df.id_code == '5d024177e214']
classes_dist = pd.DataFrame(train_df['diagnosis'].value_counts()/train_df.shape[0]).reset_index()
# barplot
ax = sns.barplot(x="index", y="diagnosis", data=classes_dist)
# Imbalanced dataset with 49% - no DR, 8% proliferative - i.e most severe DR
# Model Building - Need to do oversampling for minority classes
root_path = pathlib.Path(train_images_path)
for item in root_path.iterdir():
break
all_paths = list(root_path.glob('*.png'))
all_paths[0]
all_paths = [str(path) for path in all_paths]
random.shuffle(all_paths)
img = tf.read_file(all_paths)
img
def preprocess_image(image):
img_tensor = tf.image.decode_png(image, channels=3)
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor /= 255.0
return img_tensor
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
train_df.columns
train_df['image_path'] = '../input/train_images/' + train_df['id_code']
np.array(train_df['diagnosis'])
labels = tf.convert_to_tensor(np.array(train_df['diagnosis']), dtype=tf.int32)
paths = tf.convert_to_tensor(np.array(train_df['image_path']), dtype=tf.string)
image, label = tf.train.slice_input_producer([paths, labels], shuffle=True)
path_ds = tf.data.Dataset.from_tensor_slices(train_df['image_path'])
AUTOTUNE = tf.data.experimental.AUTOTUNE
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
image_ds.take(1)
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
for n, image in enumerate(image_ds.take(4)):
print(image.shape)
plt.subplot(2, 2, n + 1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show() | code |
17118879/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pathlib
import random
import seaborn as sns
import tensorflow as tf
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sample_sub_df = pd.read_csv('../input/train.csv')
train_images_path = '../input/train_images'
test_images_path = '../input/test_images'
train_df[train_df.id_code == '5d024177e214']
classes_dist = pd.DataFrame(train_df['diagnosis'].value_counts()/train_df.shape[0]).reset_index()
# barplot
ax = sns.barplot(x="index", y="diagnosis", data=classes_dist)
# Imbalanced dataset with 49% - no DR, 8% proliferative - i.e most severe DR
# Model Building - Need to do oversampling for minority classes
root_path = pathlib.Path(train_images_path)
for item in root_path.iterdir():
break
all_paths = list(root_path.glob('*.png'))
all_paths[0]
all_paths = [str(path) for path in all_paths]
random.shuffle(all_paths)
img = tf.read_file(all_paths)
img
def preprocess_image(image):
img_tensor = tf.image.decode_png(image, channels=3)
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor /= 255.0
return img_tensor
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
train_df.columns
train_df['image_path'] = '../input/train_images/' + train_df['id_code']
np.array(train_df['diagnosis'])
labels = tf.convert_to_tensor(np.array(train_df['diagnosis']), dtype=tf.int32)
paths = tf.convert_to_tensor(np.array(train_df['image_path']), dtype=tf.string)
image, label = tf.train.slice_input_producer([paths, labels], shuffle=True)
path_ds = tf.data.Dataset.from_tensor_slices(train_df['image_path'])
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds) | code |
17118879/cell_2 | [
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
os.getcwd() | code |
17118879/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sample_sub_df = pd.read_csv('../input/train.csv')
train_df[train_df.id_code == '5d024177e214']
classes_dist = pd.DataFrame(train_df['diagnosis'].value_counts() / train_df.shape[0]).reset_index()
ax = sns.barplot(x='index', y='diagnosis', data=classes_dist) | code |
17118879/cell_19 | [
"text_html_output_1.png"
] | from IPython.core.display import Image
from IPython.display import display
import pathlib
import random
train_images_path = '../input/train_images'
test_images_path = '../input/test_images'
root_path = pathlib.Path(train_images_path)
for item in root_path.iterdir():
break
all_paths = list(root_path.glob('*.png'))
all_paths[0]
all_paths = [str(path) for path in all_paths]
random.shuffle(all_paths)
for n in range(3):
image_path = random.choice(all_paths)
print(image_path)
display(Image(image_path, width=300, height=300)) | code |
17118879/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
17118879/cell_7 | [
"text_plain_output_1.png"
] | train_images_path = '../input/train_images'
test_images_path = '../input/test_images'
print(train_images_path) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.