path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128001996/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
target = 'yield'
df1 = pd.read_csv(TRAIN_CSV)
df1.rename({'Id': 'id'}, axis=1, inplace=True)
df1['test'] = 0
df1['gen'] = 1
df2 = pd.read_csv(TEST_CSV)
df2.rename({'Id': 'id'}, axis=1, inplace=True)
df2['test'] = 1
df2['gen'] = 1
df3 = pd.read_csv(EXTERNAL_CSV)
df3.rename({'Row#': 'id'}, axis=1, inplace=True)
df3['test'] = 0
df3['gen'] = 0
df = pd.concat([df1, df2, df3])
df.id.fillna(-1, inplace=True)
df.id = df.id.astype(int)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.columns
if True:
num_columns = ['fruitset', 'fruitmass', 'seeds']
ncols = 4
for n, col in enumerate(num_columns):
if n % ncols == 0:
fig, axs = plt.subplots(ncols=ncols, figsize=(24, 6))
ax = axs[n % ncols]
sns.histplot(data=df[col], ax=ax) | code |
105196211/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df2.head() | code |
105196211/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2.head() | code |
105196211/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum() | code |
105196211/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 1)
plt.scatter(df.index, df.English)
plt.title('English')
plt.subplot(2, 2, 2)
plt.scatter(df.index, df.Logical)
plt.title('Logical')
plt.subplot(2, 2, 3)
plt.scatter(df.index, df.Quant)
plt.title('Quant')
plt.show() | code |
105196211/cell_4 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df | code |
105196211/cell_34 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
sns.countplot(df['Gender'], palette='inferno') | code |
105196211/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df3 = df2[df['collegeGPA'] >= 40]
df3.shape | code |
105196211/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
spec = df2['Specialization'].value_counts(ascending=False)
spec
specless10 = spec[spec <= 10]
specless10
def remove(x):
if x in specless10:
return 'other'
else:
return x
df2['Specialization'] = df2['Specialization'].apply(remove)
df2['Specialization'].unique() | code |
105196211/cell_26 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 1)
plt.scatter(df.index, df.Domain)
plt.title('Domain')
plt.subplot(2, 2, 2)
plt.scatter(df.index, df.ComputerProgramming)
plt.title('ComputerProgramming')
plt.subplot(2, 2, 3)
plt.scatter(df.index, df.ElectronicsAndSemicon)
plt.title('ElectronicsAndSemicon')
plt.show() | code |
105196211/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape | code |
105196211/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
spec = df2['Specialization'].value_counts(ascending=False)
spec
specless10 = spec[spec <= 10]
specless10 | code |
105196211/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105196211/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.head(5) | code |
105196211/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
spec = df2['Specialization'].value_counts(ascending=False)
spec | code |
105196211/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
sns.scatterplot(df['10percentage'], df['12percentage']) | code |
105196211/cell_8 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns | code |
105196211/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df2.info() | code |
105196211/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df2['Specialization'].value_counts() | code |
105196211/cell_38 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df3 = df2[df['collegeGPA'] >= 40]
df3.shape
df3 = df3.replace(-1, np.nan)
column_with_nan = [column for column in df3.columns if df3.isnull().sum()[column] > 0]
for column in column_with_nan:
df3[column] = df3[column].fillna(df3[column].mean())
df3.columns
df3.columns
sns.countplot(df3['Degree']) | code |
105196211/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df2['Degree'].unique() | code |
105196211/cell_35 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
sns.scatterplot(df['10percentage'], df['12percentage'], hue=df.CollegeTier) | code |
105196211/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df3 = df2[df['collegeGPA'] >= 40]
df3.shape
df3 = df3.replace(-1, np.nan)
column_with_nan = [column for column in df3.columns if df3.isnull().sum()[column] > 0]
for column in column_with_nan:
df3[column] = df3[column].fillna(df3[column].mean())
df3.columns | code |
105196211/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df2.head() | code |
105196211/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
sns.scatterplot(x=df2.index, y=df['collegeGPA']) | code |
105196211/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.describe() | code |
105196211/cell_37 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns
df2 = df.drop(columns=['ID', 'DOB', '10board', '12graduation', '12board', 'CollegeState', 'CollegeID', 'CollegeCityTier', 'CollegeCityID', 'GraduationYear'])
df2 = df2.drop_duplicates()
df3 = df2[df['collegeGPA'] >= 40]
df3.shape
df3 = df3.replace(-1, np.nan)
column_with_nan = [column for column in df3.columns if df3.isnull().sum()[column] > 0]
for column in column_with_nan:
df3[column] = df3[column].fillna(df3[column].mean())
df3.columns
df3.columns | code |
105196211/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/engineering-graduate-salary-prediction/Engineering_graduate_salary.csv')
df
df.columns
df.isnull().sum()
df.shape
df.columns | code |
90104932/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr()
pol_hlc_data = cleaned_hlc_data.copy()
pol_hlc_data = pol_hlc_data[pol_hlc_data['Pollution(Index score) (City)'] != '-']
pol_hlc_data['Pollution(Index score) (City)'] = pol_hlc_data['Pollution(Index score) (City)'].astype(str).astype(float)
pol_hlc_data.corr()
plt.figure(figsize=(10, 5))
plt.xlabel('Happiness')
plt.ylabel('Annual avg. hours worked')
plt.title('Happiness and Hours Worked Viz')
sns.regplot(data=hours_hlc_data, x='Happiness levels(Country)', y='Annual avg. hours worked') | code |
90104932/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
print('Preview of the data: ')
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.head(5) | code |
90104932/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr()
pol_hlc_data = cleaned_hlc_data.copy()
pol_hlc_data = pol_hlc_data[pol_hlc_data['Pollution(Index score) (City)'] != '-']
pol_hlc_data['Pollution(Index score) (City)'] = pol_hlc_data['Pollution(Index score) (City)'].astype(str).astype(float)
pol_hlc_data.corr()
plt.figure(figsize=(10, 5))
plt.xlabel('Happiness')
plt.ylabel('Pollution(Index Score)')
plt.title('Happiness and Pollution Viz')
sns.regplot(data=pol_hlc_data, x='Happiness levels(Country)', y='Pollution(Index score) (City)') | code |
90104932/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
print('Description of the data: ')
hlc_data.describe() | code |
90104932/cell_11 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr() | code |
90104932/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr()
pol_hlc_data = cleaned_hlc_data.copy()
pol_hlc_data = pol_hlc_data[pol_hlc_data['Pollution(Index score) (City)'] != '-']
pol_hlc_data['Pollution(Index score) (City)'] = pol_hlc_data['Pollution(Index score) (City)'].astype(str).astype(float)
pol_hlc_data.corr()
plt.figure(figsize=(10, 5))
plt.xlabel('Happiness')
plt.ylabel('Annual avg. hours worked')
plt.title('Happiness and Hours Worked Viz')
sns.scatterplot(data=hours_hlc_data, x='Happiness levels(Country)', y='Annual avg. hours worked') | code |
90104932/cell_1 | [
"text_plain_output_1.png"
] | import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
pd.plotting.register_matplotlib_converters()
print('Setup Complete') | code |
90104932/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr() | code |
90104932/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr()
pol_hlc_data = cleaned_hlc_data.copy()
pol_hlc_data = pol_hlc_data[pol_hlc_data['Pollution(Index score) (City)'] != '-']
pol_hlc_data['Pollution(Index score) (City)'] = pol_hlc_data['Pollution(Index score) (City)'].astype(str).astype(float)
pol_hlc_data.corr()
plt.figure(figsize=(10, 5))
plt.xlabel('Happiness')
plt.ylabel('Life Expectancy')
plt.title('Happiness and Life Expactancy Viz')
sns.scatterplot(data=cleaned_hlc_data, x='Happiness levels(Country)', y='Life expectancy(years) (Country)') | code |
90104932/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr()
pol_hlc_data = cleaned_hlc_data.copy()
pol_hlc_data = pol_hlc_data[pol_hlc_data['Pollution(Index score) (City)'] != '-']
pol_hlc_data['Pollution(Index score) (City)'] = pol_hlc_data['Pollution(Index score) (City)'].astype(str).astype(float)
pol_hlc_data.corr()
plt.figure(figsize=(10, 5))
plt.xlabel('Happiness')
plt.ylabel('Life Expectancy')
plt.title('Happiness and Life Expactancy Viz')
sns.regplot(data=cleaned_hlc_data, x='Happiness levels(Country)', y='Life expectancy(years) (Country)') | code |
90104932/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr() | code |
90104932/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
hlc_data.columns
cleaned_hlc_data = hlc_data.copy()
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].str.replace('%', '', regex=False)
cleaned_hlc_data['Obesity levels(Country)'] = cleaned_hlc_data['Obesity levels(Country)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a bottle of water(City)'] = cleaned_hlc_data['Cost of a bottle of water(City)'].astype(str).astype(float)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].str.replace('£', '', regex=False)
cleaned_hlc_data['Cost of a monthly gym membership(City)'] = cleaned_hlc_data['Cost of a monthly gym membership(City)'].astype(str).astype(float)
cleaned_hlc_data.corr()
hours_hlc_data = cleaned_hlc_data.copy()
hours_hlc_data = hours_hlc_data[hours_hlc_data['Annual avg. hours worked'] != '-']
hours_hlc_data['Annual avg. hours worked'] = hours_hlc_data['Annual avg. hours worked'].astype(str).astype(int)
hours_hlc_data.corr()
sun_hlc_data = cleaned_hlc_data.copy()
sun_hlc_data = sun_hlc_data[sun_hlc_data['Sunshine hours(City)'] != '-']
sun_hlc_data['Sunshine hours(City)'] = sun_hlc_data['Sunshine hours(City)'].astype(str).astype(int)
sun_hlc_data.corr()
pol_hlc_data = cleaned_hlc_data.copy()
pol_hlc_data = pol_hlc_data[pol_hlc_data['Pollution(Index score) (City)'] != '-']
pol_hlc_data['Pollution(Index score) (City)'] = pol_hlc_data['Pollution(Index score) (City)'].astype(str).astype(float)
pol_hlc_data.corr() | code |
90104932/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
hlc_data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv')
print('City and Rank + The 10 metrics: ')
hlc_data.columns | code |
34129954/cell_4 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
TRAINING_DIR = '/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/'
VALIDATION_DIR = '/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/valid/'
train_gen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=0, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
valid_gen = ImageDataGenerator(rescale=1.0 / 255)
train_data = train_gen.flow_from_directory(TRAINING_DIR, target_size=(227, 227), class_mode='categorical', color_mode='rgb', batch_size=64)
valid_data = valid_gen.flow_from_directory(VALIDATION_DIR, target_size=(227, 227), class_mode='categorical', color_mode='rgb')
for cl_indis, cl_name in enumerate(train_data.class_indices):
print(cl_indis, cl_name) | code |
34129954/cell_6 | [
"text_plain_output_1.png"
] | from keras.layers import Convolution2D,MaxPooling2D,Flatten,Dense,Dropout
from keras.models import Sequential
model = Sequential()
model.add(Convolution2D(96, 11, strides=(4, 4), padding='valid', input_shape=(227, 227, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Convolution2D(256, 5, strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Convolution2D(384, 3, strides=(1, 1), padding='same', activation='relu'))
model.add(Convolution2D(384, 3, strides=(1, 1), padding='same', activation='relu'))
model.add(Convolution2D(256, 3, strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Flatten())
model.add(Dense(units=4096, activation='relu'))
model.add(Dense(units=4096, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
model.summary() | code |
34129954/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow as tf
import tensorflow as tf
import tensorflow as tf
tf.test.gpu_device_name()
import tensorflow as tf
import keras_preprocessing
from tensorflow.keras.preprocessing import image
import pickle
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
tf.__version__ | code |
34129954/cell_1 | [
"text_plain_output_1.png"
] | import tensorflow as tf
import tensorflow as tf
tf.test.gpu_device_name() | code |
34129954/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from keras.layers import Convolution2D,MaxPooling2D,Flatten,Dense,Dropout
from keras.models import Sequential
from keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import keras
TRAINING_DIR = '/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/'
VALIDATION_DIR = '/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/valid/'
train_gen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=0, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
valid_gen = ImageDataGenerator(rescale=1.0 / 255)
train_data = train_gen.flow_from_directory(TRAINING_DIR, target_size=(227, 227), class_mode='categorical', color_mode='rgb', batch_size=64)
valid_data = valid_gen.flow_from_directory(VALIDATION_DIR, target_size=(227, 227), class_mode='categorical', color_mode='rgb')
model = Sequential()
model.add(Convolution2D(96, 11, strides=(4, 4), padding='valid', input_shape=(227, 227, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Convolution2D(256, 5, strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Convolution2D(384, 3, strides=(1, 1), padding='same', activation='relu'))
model.add(Convolution2D(384, 3, strides=(1, 1), padding='same', activation='relu'))
model.add(Convolution2D(256, 3, strides=(1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Flatten())
model.add(Dense(units=4096, activation='relu'))
model.add(Dense(units=4096, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
model.summary()
from keras.optimizers import Adam
import keras
opt = Adam(lr=0.001)
model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
train_num = train_data.n
valid_num = valid_data.n
train_batch_size = train_data.batch_size
valid_batch_size = valid_data.batch_size
STEP_SIZE_TRAIN = train_num // train_batch_size
STEP_SIZE_VALID = valid_num // valid_batch_size
history = model.fit_generator(generator=train_data, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_data, validation_steps=STEP_SIZE_VALID, epochs=25) | code |
34129954/cell_3 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
TRAINING_DIR = '/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/'
VALIDATION_DIR = '/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/valid/'
train_gen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=0, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
valid_gen = ImageDataGenerator(rescale=1.0 / 255)
train_data = train_gen.flow_from_directory(TRAINING_DIR, target_size=(227, 227), class_mode='categorical', color_mode='rgb', batch_size=64)
valid_data = valid_gen.flow_from_directory(VALIDATION_DIR, target_size=(227, 227), class_mode='categorical', color_mode='rgb') | code |
16150103/cell_11 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.models import Sequential
import pickle
import pickle
with open('../input/X.pickle', 'rb') as fp:
X_feature = pickle.load(fp)
with open('../input/Y.pickle', 'rb') as fp:
Y_label = pickle.load(fp)
X_feature = X_feature / 255.0
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X_feature.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(x=X_feature, y=Y_label, batch_size=20, epochs=50, validation_split=0.1, shuffle=True)
model.save('image_classifier_002.model')
model.summary() | code |
16150103/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os
print(os.listdir('../input')) | code |
16150103/cell_8 | [
"text_plain_output_1.png"
] | from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.models import Sequential
import pickle
import pickle
with open('../input/X.pickle', 'rb') as fp:
X_feature = pickle.load(fp)
with open('../input/Y.pickle', 'rb') as fp:
Y_label = pickle.load(fp)
X_feature = X_feature / 255.0
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X_feature.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(x=X_feature, y=Y_label, batch_size=20, epochs=50, validation_split=0.1, shuffle=True) | code |
16120502/cell_42 | [
"image_output_1.png"
] | from scipy import stats
from scipy.stats import norm, skew
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
all_nulls
plt.xticks(rotation='90')
saleprice_df = pd.concat([train.SalePrice, np.log(train.SalePrice + 1).rename('LogSalePrice')], axis=1, names=['SalePrice', 'LogSalePrice'])
train = train.drop(train[train.SalePrice > 450000].index)
sns.set_style("white")
sns.set_color_codes(palette='deep')
# Create figure space
fig, ax = plt.subplots(figsize=(18,5), ncols=2, nrows=1)
# Create a distribution plot
ax1 = sns.distplot(saleprice_df.SalePrice, kde=False, fit=norm, ax=ax[0])
ax2 = sns.distplot(saleprice_df.LogSalePrice, kde=False, fit=norm, ax=ax[1])
# Set plot features
ax1.set_title('SalePrice Distribution')
ax2.set_title('LogSalePrice Distribution')
mu, sigma = norm.fit(train['SalePrice'])
train['LogSalePrice'] = np.log1p(train.SalePrice)
stats.probplot(train['SalePrice'], plot=plt)
plt.show() | code |
16120502/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
test.head() | code |
16120502/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
all_nulls
plt.figure(figsize=(12, 8))
sns.barplot(x=all_nulls.index, y='Null Data Pct', data=all_nulls)
plt.xticks(rotation='90')
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of Missing Values', fontsize=15)
plt.title('Percent of Missing Data by Feature', fontsize=15) | code |
16120502/cell_34 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import norm, skew
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
all_nulls
plt.xticks(rotation='90')
saleprice_df = pd.concat([train.SalePrice, np.log(train.SalePrice + 1).rename('LogSalePrice')], axis=1, names=['SalePrice', 'LogSalePrice'])
train = train.drop(train[train.SalePrice > 450000].index)
sns.set_style("white")
sns.set_color_codes(palette='deep')
# Create figure space
fig, ax = plt.subplots(figsize=(18,5), ncols=2, nrows=1)
# Create a distribution plot
ax1 = sns.distplot(saleprice_df.SalePrice, kde=False, fit=norm, ax=ax[0])
ax2 = sns.distplot(saleprice_df.LogSalePrice, kde=False, fit=norm, ax=ax[1])
# Set plot features
ax1.set_title('SalePrice Distribution')
ax2.set_title('LogSalePrice Distribution')
plt.figure(figsize=(10, 5))
sns.distplot(train['SalePrice'], fit=norm)
mu, sigma = norm.fit(train['SalePrice'])
print('\n mu = {:.0f} and sigma = {:.0f}\n'.format(mu, sigma))
plt.legend(['Norm Dist. ($\\mu=$ {:.0f} and $\\sigma=$ {:.0f} )'.format(mu, sigma)], loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution') | code |
16120502/cell_23 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
print('There are', len(all_nulls), 'columns with missing values.')
all_nulls | code |
16120502/cell_29 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
all_nulls
saleprice_df = pd.concat([train.SalePrice, np.log(train.SalePrice + 1).rename('LogSalePrice')], axis=1, names=['SalePrice', 'LogSalePrice'])
saleprice_df.head() | code |
16120502/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv') | code |
16120502/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
print('*' * 40)
print('********** train shape: ' + str(train.shape) + '*' * 10)
print(train.info())
print('*' * 40)
print('********** test shape: ' + str(test.shape) + '*' * 10) | code |
16120502/cell_18 | [
"text_plain_output_1.png"
] | """
Some functions to start off with:
train.sample()
train.describe()
train.describe(include=['O'])
train.describe(include='all')
train.head()
train.tail()
train.value_counts().sum()
train.isnull().sum()
train.count()
train.fillna()
train.fillna(train[col].mode(), inplace=True)
train.mean()
train.median()
train.mode()
train.shape
train.info()
""" | code |
16120502/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import norm, skew
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
all_nulls
plt.xticks(rotation='90')
saleprice_df = pd.concat([train.SalePrice, np.log(train.SalePrice + 1).rename('LogSalePrice')], axis=1, names=['SalePrice', 'LogSalePrice'])
sns.set_style('white')
sns.set_color_codes(palette='deep')
fig, ax = plt.subplots(figsize=(18, 5), ncols=2, nrows=1)
ax1 = sns.distplot(saleprice_df.SalePrice, kde=False, fit=norm, ax=ax[0])
ax2 = sns.distplot(saleprice_df.LogSalePrice, kde=False, fit=norm, ax=ax[1])
ax1.set_title('SalePrice Distribution')
ax2.set_title('LogSalePrice Distribution') | code |
16120502/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3] | code |
16120502/cell_35 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train.iloc[0:5, :3]
null_cols = pd.DataFrame(train.isnull().sum().sort_values(ascending=False), columns=['Null Data Count'])
null_cols_pct = pd.DataFrame(round(train.isnull().sum().sort_values(ascending=False) / len(train), 2) * 100, columns=['Null Data Pct'])
null_cols_df = pd.DataFrame(pd.concat([null_cols, null_cols_pct], axis=1))
all_nulls = null_cols_df[null_cols_df['Null Data Pct'] > 0]
all_nulls
saleprice_df = pd.concat([train.SalePrice, np.log(train.SalePrice + 1).rename('LogSalePrice')], axis=1, names=['SalePrice', 'LogSalePrice'])
train = train.drop(train[train.SalePrice > 450000].index)
print('Skewness: %f' % train['SalePrice'].skew())
print('Kurtosis: %f' % train['SalePrice'].kurt()) | code |
16120502/cell_10 | [
"image_output_1.png"
] | import pandas_profiling
import numpy as np
import random as rand
import datetime as dt
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import norm, skew
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
from scipy import stats
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
import warnings
warnings.filterwarnings(action='ignore')
pd.options.display.max_seq_items = 5000
pd.options.display.max_rows = 5000
flatui = ['#9b59b6', '#3498db', '#95a5a6', '#e74c3c', '#34495e', '#2ecc71']
sns.set_palette(flatui)
sns.palplot(sns.color_palette(flatui)) | code |
16120502/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = pd.read_csv('09-house-train.csv')
test = pd.read_csv('09-house-test.csv')
train.head() | code |
105189792/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
pd.options.display.max_info_columns = 200
data.drop(index=data[(data['iday'] < 1) | (data['iday'] > 31) | (data['imonth'] < 1) | (data['imonth'] > 12) | (data['iyear'] < 1998) | (data['iyear'] > 2017)].index, inplace=True)
data['date'] = data['iyear'].map(str) + '/' + data['imonth'].map(str) + '/' + data['iday'].map(str)
data['date'] = pd.to_datetime(data['date'])
data = data.reset_index(drop=True)
miss_list = data.columns[data.isnull().sum() > data.shape[0] * 1 / 4]
data.drop(miss_list, axis=1, inplace=True)
data_nf = data.copy()
for column in data.columns:
if data.dtypes[column] == 'object':
data[column] = pd.factorize(data[column])[0].astype(int)
pass_list = ['latitude', 'longitude', 'scite1', 'dbsource', 'summary', 'target1', 'natlty1_txt', 'country_txt', 'region_txt', 'attacktype1_txt', 'targtype1_txt', 'targsubtype1_txt', 'weaptype1_txt', 'weapsubtype1_txt']
data.drop(['iyear', 'imonth', 'iday'], axis=1, inplace=True)
data.drop(pass_list, axis=1, inplace=True)
data.info() | code |
105189792/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
pd.options.display.max_info_columns = 200
data.info() | code |
105189792/cell_6 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
pd.options.display.max_info_columns = 200
data.drop(index=data[(data['iday'] < 1) | (data['iday'] > 31) | (data['imonth'] < 1) | (data['imonth'] > 12) | (data['iyear'] < 1998) | (data['iyear'] > 2017)].index, inplace=True)
data['date'] = data['iyear'].map(str) + '/' + data['imonth'].map(str) + '/' + data['iday'].map(str)
data['date'] = pd.to_datetime(data['date'])
data = data.reset_index(drop=True)
miss_list = data.columns[data.isnull().sum() > data.shape[0] * 1 / 4]
print(miss_list)
data.drop(miss_list, axis=1, inplace=True)
data.info() | code |
105189792/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
pd.options.display.max_info_columns = 200
data.drop(index=data[(data['iday'] < 1) | (data['iday'] > 31) | (data['imonth'] < 1) | (data['imonth'] > 12) | (data['iyear'] < 1998) | (data['iyear'] > 2017)].index, inplace=True)
data['date'] = data['iyear'].map(str) + '/' + data['imonth'].map(str) + '/' + data['iday'].map(str)
data['date'] = pd.to_datetime(data['date'])
data = data.reset_index(drop=True)
miss_list = data.columns[data.isnull().sum() > data.shape[0] * 1 / 4]
data.drop(miss_list, axis=1, inplace=True)
data_nf = data.copy()
for column in data.columns:
if data.dtypes[column] == 'object':
data[column] = pd.factorize(data[column])[0].astype(int)
data.info() | code |
105189792/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
pd.options.display.max_info_columns = 200
data.drop(index=data[(data['iday'] < 1) | (data['iday'] > 31) | (data['imonth'] < 1) | (data['imonth'] > 12) | (data['iyear'] < 1998) | (data['iyear'] > 2017)].index, inplace=True)
data['date'] = data['iyear'].map(str) + '/' + data['imonth'].map(str) + '/' + data['iday'].map(str)
data['date'] = pd.to_datetime(data['date'])
data = data.reset_index(drop=True)
miss_list = data.columns[data.isnull().sum() > data.shape[0] * 1 / 4]
data.drop(miss_list, axis=1, inplace=True)
data_nf = data.copy()
for column in data.columns:
if data.dtypes[column] == 'object':
data[column] = pd.factorize(data[column])[0].astype(int)
data.iloc[:].hist(bins=100, figsize=(20, 74), layout=(29, 4)) | code |
105189792/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
print('数据大小:', data.shape)
data.head(1) | code |
105189792/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/attackincident/1.csv', nrows=3000, skiprows=range(1, 74732))
pd.options.display.max_info_columns = 200
data.drop(index=data[(data['iday'] < 1) | (data['iday'] > 31) | (data['imonth'] < 1) | (data['imonth'] > 12) | (data['iyear'] < 1998) | (data['iyear'] > 2017)].index, inplace=True)
data['date'] = data['iyear'].map(str) + '/' + data['imonth'].map(str) + '/' + data['iday'].map(str)
data['date'] = pd.to_datetime(data['date'])
data = data.reset_index(drop=True)
miss_list = data.columns[data.isnull().sum() > data.shape[0] * 1 / 4]
data.drop(miss_list, axis=1, inplace=True)
data_nf = data.copy()
for column in data.columns:
if data.dtypes[column] == 'object':
data[column] = pd.factorize(data[column])[0].astype(int)
pass_list = ['latitude', 'longitude', 'scite1', 'dbsource', 'summary', 'target1', 'natlty1_txt', 'country_txt', 'region_txt', 'attacktype1_txt', 'targtype1_txt', 'targsubtype1_txt', 'weaptype1_txt', 'weapsubtype1_txt']
data.drop(['iyear', 'imonth', 'iday'], axis=1, inplace=True)
data.drop(pass_list, axis=1, inplace=True)
plt.figure(figsize=(35, 35))
plt.title('correlation heatmap of df_data')
heatmap = sns.heatmap(data.copy().corr(), square=True, annot=True, fmt='.2f', linecolor='black')
heatmap.set_xticklabels(heatmap.get_xticklabels(), rotation=30)
heatmap.set_yticklabels(heatmap.get_yticklabels(), rotation=30)
plt.show()
data.drop(['INT_LOG'], axis=1, inplace=True)
data.info() | code |
2016018/cell_2 | [
"text_plain_output_1.png"
] | from sklearn import cross_validation
from sklearn.svm import SVR
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
train = pd.read_table('../input/train.tsv')
drpNa = train.drop(['train_id', 'name', 'category_name', 'brand_name', 'item_description'], 1)
drpNa = drpNa.dropna()
def rmsle(h, y):
"""
Compute the Root Mean Squared Log Error for hypthesis h and targets y
Args:
h - numpy array containing predictions with shape (n_samples, n_targets)
y - numpy array containing targets with shape (n_samples, n_targets)
"""
return np.sqrt(np.square(np.log(h + 1) - np.log(y + 1)).mean())
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(drpNa[['item_condition_id', 'shipping']][:10000], drpNa['price'][:10000], test_size=0.4, random_state=0)
from sklearn.svm import SVR
clf = SVR(C=1.0, epsilon=0.2)
clf.fit(X_train, y_train)
pre = clf.predict(X_test)
rmsle(pre, y_test)
test = pd.read_table('../input/test.tsv')
TdrpNa = test.drop(['test_id', 'name', 'category_name', 'brand_name', 'item_description'], 1)
TdrpNa = TdrpNa.dropna()
trial_sub1 = clf.predict(TdrpNa)
submission = pd.read_csv('../input/sample_submission.csv')
submission['price'] = trial_sub1
submission.to_csv('fist_trial.csv', index=False)
len(submission) | code |
2016018/cell_1 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn import cross_validation
from sklearn.svm import SVR
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
train = pd.read_table('../input/train.tsv')
drpNa = train.drop(['train_id', 'name', 'category_name', 'brand_name', 'item_description'], 1)
drpNa = drpNa.dropna()
def rmsle(h, y):
"""
Compute the Root Mean Squared Log Error for hypthesis h and targets y
Args:
h - numpy array containing predictions with shape (n_samples, n_targets)
y - numpy array containing targets with shape (n_samples, n_targets)
"""
return np.sqrt(np.square(np.log(h + 1) - np.log(y + 1)).mean())
print(train.head())
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(drpNa[['item_condition_id', 'shipping']][:10000], drpNa['price'][:10000], test_size=0.4, random_state=0)
from sklearn.svm import SVR
clf = SVR(C=1.0, epsilon=0.2)
clf.fit(X_train, y_train)
pre = clf.predict(X_test)
rmsle(pre, y_test)
test = pd.read_table('../input/test.tsv')
TdrpNa = test.drop(['test_id', 'name', 'category_name', 'brand_name', 'item_description'], 1)
TdrpNa = TdrpNa.dropna()
trial_sub1 = clf.predict(TdrpNa)
submission = pd.read_csv('../input/sample_submission.csv')
submission['price'] = trial_sub1
submission.to_csv('fist_trial.csv', index=False) | code |
128027453/cell_13 | [
"text_html_output_1.png"
] | from sklearn.utils import resample
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts()
def plot_histogram(dataset: pd.DataFrame):
unique_labels, counts = np.unique(dataset.diabetes, return_counts=True)
from sklearn.utils import resample
df_majority = df[df.diabetes == 0]
df_minority = df[df.diabetes == 1]
df_majority_downsampled = resample(df_majority, replace=False, n_samples=df.diabetes.value_counts()[1], random_state=1234)
df_downsampled = pd.concat([df_majority_downsampled, df_minority])
plot_histogram(df_downsampled) | code |
128027453/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts() | code |
128027453/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df.head() | code |
128027453/cell_20 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, recall_score
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=15, n_jobs=-1)
rnd_clf.fit(X_train, y_train)
rnd_clf_preds = rnd_clf.predict(X_test)
from xgboost import XGBClassifier
xgb_clf = XGBClassifier(early_stopping_rounds=3)
xgb_clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
xgb_clf_preds = xgb_clf.predict(X_test)
print('Accuracy of XGBoost on validation data : ', accuracy_score(y_test, rnd_clf_preds))
print('XGBoost accuracy on validation data : ', recall_score(y_test, xgb_clf_preds)) | code |
128027453/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
df.head(20) | code |
128027453/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts()
def plot_histogram(dataset: pd.DataFrame):
unique_labels, counts = np.unique(dataset.diabetes, return_counts=True)
plot_histogram(df) | code |
128027453/cell_19 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, recall_score
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=15, n_jobs=-1)
rnd_clf.fit(X_train, y_train)
rnd_clf_preds = rnd_clf.predict(X_test)
print('Accuracy of RandomForest on validation data : ', accuracy_score(y_test, rnd_clf_preds))
print('Recall of RandomForest on validation data : ', recall_score(y_test, rnd_clf_preds)) | code |
128027453/cell_1 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128027453/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm') | code |
128027453/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score, recall_score
from sklearn.svm import SVC
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, recall_score
svm_clf = SVC()
svm_clf.fit(X_train, y_train)
svm_clf_preds = svm_clf.predict(X_test)
print('SVM Classifier accuracy on validation data : ', recall_score(y_test, svm_clf_preds))
print('SVM Classifier accuracy on validation data : ', accuracy_score(y_test, svm_clf_preds)) | code |
128027453/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.head(10) | code |
128027453/cell_16 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
from sklearn.utils import resample
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts()
def plot_histogram(dataset: pd.DataFrame):
unique_labels, counts = np.unique(dataset.diabetes, return_counts=True)
from sklearn.utils import resample
df_majority = df[df.diabetes == 0]
df_minority = df[df.diabetes == 1]
df_majority_downsampled = resample(df_majority, replace=False, n_samples=df.diabetes.value_counts()[1], random_state=1234)
df_downsampled = pd.concat([df_majority_downsampled, df_minority])
import pandas as pd
from sklearn import preprocessing
x_unscaled = df_downsampled.drop('diabetes', axis=1).values
min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(x_unscaled)
example = pd.DataFrame(X)
example.head(10) | code |
128027453/cell_14 | [
"text_html_output_1.png"
] | from sklearn.utils import resample
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts()
def plot_histogram(dataset: pd.DataFrame):
unique_labels, counts = np.unique(dataset.diabetes, return_counts=True)
from sklearn.utils import resample
df_majority = df[df.diabetes == 0]
df_minority = df[df.diabetes == 1]
df_majority_downsampled = resample(df_majority, replace=False, n_samples=df.diabetes.value_counts()[1], random_state=1234)
df_downsampled = pd.concat([df_majority_downsampled, df_minority])
df_downsampled.head(10) | code |
104115434/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
df = pd.read_csv('../input/mayo-clinic-strip-ai/train.csv')
df_subset = df[['patient_id']][:500]
pred = torch.tensor(np.linspace(0, 1, 500))
target = torch.ones(500, dtype=torch.float64)
df_subset['CE'], df_subset['LAA'] = [pred.numpy(), 1 - pred.numpy()]
df_subset
def HingeLoss(y_pred, y_true):
"""Average hinge loss (non-regularized)
Parameters:
----------
y_true: torch tensor of shape (n_samples,)
True target, consisting of integers of two values. The positive lable must
be greater than the negative label
y_predicted: torch tensor of shape (n_samples,)
Prediction, as output by a decision function (floats)
Returns:
----------
list: tensor list of calculated loss
mean: mean loss of the batch
Hinge loss
"""
list_ = torch.Tensor([max(0, 1 - x * y) for x, y in zip(y_pred, y_true)])
return (list_, torch.mean(list_))
def BinaryCrossEntropy(y_pred, y_true):
"""Binary cross entropy loss
Parameters:
-----------
y_true: tensor of shape (n_samples,)
True target, consisting of integers of two values.
y_pred: tensor of shape (n_samples,)
Prediction, as output by a decision function (floats)
Returns:
-----------
loss: float
BCE loss
"""
term_0 = y_true * torch.log(y_pred + 1e-07)
term_1 = (1 - y_true) * torch.log(1 - y_pred + 1e-07)
return (-(term_0 + term_1), -torch.mean(term_0 + term_1, axis=0))
BCE_list, loss = BinaryCrossEntropy(torch.sigmoid(pred), target)
pos_weight_1 = torch.tensor([0.5])
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight_1, reduction='none')
bce_weight_loss_1 = criterion(pred, target)
pos_weight_2 = torch.tensor([0.75])
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight_2, reduction='none')
bce_weight_loss_2 = criterion(pred, target)
plt.style.use('seaborn')
plt.figure(figsize=(8, 6))
plt.plot(pred.numpy(), bce_weight_loss_1.numpy(), 'r--', label='weight = 0.5', linewidth=2.0)
plt.plot(pred.numpy(), bce_weight_loss_2.numpy(), 'orange', label='weight = 0.75', linewidth=2.0)
plt.plot(pred.numpy(), Hinge_list.numpy(), color='teal', label='Hinge Loss', linewidth=2.0)
plt.plot(pred.numpy(), BCE_list.numpy(), color='cornflowerblue', label='Log Loss', linewidth=2.0)
plt.legend(loc='upper right')
plt.ylabel('$L(y=1, f(x))$')
plt.ylim(0, 0.8)
plt.xlim(0, 1.0)
plt.xlabel('$Y_{pred}$', fontsize=15)
plt.ylabel('$L(y=1, f(x))$', fontsize=15)
plt.title('Convex Loss Functions', fontsize=15)
plt.show() | code |
104115434/cell_9 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
import torch
df = pd.read_csv('../input/mayo-clinic-strip-ai/train.csv')
df_subset = df[['patient_id']][:500]
pred = torch.tensor(np.linspace(0, 1, 500))
target = torch.ones(500, dtype=torch.float64)
df_subset['CE'], df_subset['LAA'] = [pred.numpy(), 1 - pred.numpy()]
df_subset | code |
104115434/cell_6 | [
"image_output_1.png"
] | from itables import init_notebook_mode
import seaborn as sns
from itables import init_notebook_mode
init_notebook_mode(all_interactive=True)
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from sklearn.metrics import hinge_loss
import random
import tensorflow as tf | code |
104115434/cell_5 | [
"image_output_1.png"
] | !pip install itables | code |
130023373/cell_9 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | import pandas as pd #dataframe manipulation
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
train.info() | code |
130023373/cell_4 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_9.png"
] | !pip install distance -q | code |
130023373/cell_34 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
from tqdm import tqdm_notebook
import cv2 #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,label in enumerate(['Watermark','No Watermark']):
_=plt.subplot(1,2,i+1)
img=img_read(path,train[train['Label']==label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{'With Watermark' if label=='Watermark' else 'No Watermark'}")
def rgb_dist_plot(img,ax):
start=0
end=256
for _,color in enumerate(['Red','Green','Blue']):
_=sns.kdeplot(img.histogram()[start:end],label=color,color=color)
_=plt.legend();
start+=256
end+=256
for label in ['Watermark','No Watermark']:
fig, axs = plt.subplots(1, 2 ,figsize=(15,5))
img_id=train[train['Label']==label].head(1)['Image'].values[0]
img_file =Image.open(f"/kaggle/input/machinehack-watermark-challenge/train/{img_id}")
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id,fontsize=18)
_=rgb_dist_plot(img_file,ax=axs[1])
axs[1].set_title("RGB Color Distribution For "+img_id,fontsize=18)
def basic_image_info(df, path):
image_name = []
img_mode = []
img_height = []
img_width = []
img_contrast = []
for file in tqdm_notebook(df['Image']):
image_name.append(file)
img = Image.open(f'{os.path.join(path, file)}')
grey_img = cv2.imread(f'{os.path.join(path, file)}', cv2.COLOR_BGR2GRAY)
img_mode.append(img.mode)
img_width.append(img.width)
img_height.append(img.height)
img_contrast.append(grey_img.std())
return pd.DataFrame({'image_name': image_name, 'img_mode': img_mode, 'img_contrast': img_contrast, 'img_width': img_width, 'img_height': img_height})
train_image_basic_info=basic_image_info(train,
"/kaggle/input/machinehack-watermark-challenge/train")
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,img in enumerate(train_image_basic_info[train_image_basic_info['img_contrast']<15]['image_name'].values):
_=plt.subplot(5,2,i+1)
img_file=img_read(path,img)
plt.imshow(img_file)
plt.axis('off')
plt.title(f"{'With Watermark' if train[train['Image']==img]['Label'].values=='Watermark' else 'No Watermark'}")
for img in train_image_basic_info[train_image_basic_info['img_contrast']<15]['image_name'][:10].values:
fig, axs = plt.subplots(1, 2 ,figsize=(15,5))
img_file =Image.open(f"/kaggle/input/machinehack-watermark-challenge/train/{img}")
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id,fontsize=18)
_=rgb_dist_plot(img_file,ax=axs[1])
axs[1].set_title("RGB Color Distribution For "+img_id,fontsize=18)
path = '/kaggle/input/machinehack-watermark-challenge/train'
fig = plt.figure(figsize=(8, 15))
for i, img in enumerate(train_image_basic_info[train_image_basic_info['img_contrast'] > 98]['image_name'].values):
_ = plt.subplot(8, 2, i + 1)
img_file = img_read(path, img)
plt.imshow(img_file)
plt.axis('off')
plt.title(f"{('With Watermark' if train[train['Image'] == img]['Label'].values == 'Watermark' else 'No Watermark')}") | code |
130023373/cell_23 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,label in enumerate(['Watermark','No Watermark']):
_=plt.subplot(1,2,i+1)
img=img_read(path,train[train['Label']==label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{'With Watermark' if label=='Watermark' else 'No Watermark'}")
def rgb_dist_plot(img,ax):
start=0
end=256
for _,color in enumerate(['Red','Green','Blue']):
_=sns.kdeplot(img.histogram()[start:end],label=color,color=color)
_=plt.legend();
start+=256
end+=256
for label in ['Watermark', 'No Watermark']:
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
img_id = train[train['Label'] == label].head(1)['Image'].values[0]
img_file = Image.open(f'/kaggle/input/machinehack-watermark-challenge/train/{img_id}')
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id, fontsize=18)
_ = rgb_dist_plot(img_file, ax=axs[1])
axs[1].set_title('RGB Color Distribution For ' + img_id, fontsize=18) | code |
130023373/cell_20 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path = '/kaggle/input/machinehack-watermark-challenge/train'
fig = plt.figure(figsize=(8, 15))
for i, label in enumerate(['Watermark', 'No Watermark']):
_ = plt.subplot(1, 2, i + 1)
img = img_read(path, train[train['Label'] == label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{('With Watermark' if label == 'Watermark' else 'No Watermark')}") | code |
130023373/cell_26 | [
"image_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
from tqdm import tqdm_notebook
import cv2 #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,label in enumerate(['Watermark','No Watermark']):
_=plt.subplot(1,2,i+1)
img=img_read(path,train[train['Label']==label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{'With Watermark' if label=='Watermark' else 'No Watermark'}")
def rgb_dist_plot(img,ax):
start=0
end=256
for _,color in enumerate(['Red','Green','Blue']):
_=sns.kdeplot(img.histogram()[start:end],label=color,color=color)
_=plt.legend();
start+=256
end+=256
for label in ['Watermark','No Watermark']:
fig, axs = plt.subplots(1, 2 ,figsize=(15,5))
img_id=train[train['Label']==label].head(1)['Image'].values[0]
img_file =Image.open(f"/kaggle/input/machinehack-watermark-challenge/train/{img_id}")
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id,fontsize=18)
_=rgb_dist_plot(img_file,ax=axs[1])
axs[1].set_title("RGB Color Distribution For "+img_id,fontsize=18)
def basic_image_info(df, path):
image_name = []
img_mode = []
img_height = []
img_width = []
img_contrast = []
for file in tqdm_notebook(df['Image']):
image_name.append(file)
img = Image.open(f'{os.path.join(path, file)}')
grey_img = cv2.imread(f'{os.path.join(path, file)}', cv2.COLOR_BGR2GRAY)
img_mode.append(img.mode)
img_width.append(img.width)
img_height.append(img.height)
img_contrast.append(grey_img.std())
return pd.DataFrame({'image_name': image_name, 'img_mode': img_mode, 'img_contrast': img_contrast, 'img_width': img_width, 'img_height': img_height})
train_image_basic_info = basic_image_info(train, '/kaggle/input/machinehack-watermark-challenge/train') | code |
130023373/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd #dataframe manipulation
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
test.info() | code |
130023373/cell_32 | [
"text_html_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
from tqdm import tqdm_notebook
import cv2 #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,label in enumerate(['Watermark','No Watermark']):
_=plt.subplot(1,2,i+1)
img=img_read(path,train[train['Label']==label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{'With Watermark' if label=='Watermark' else 'No Watermark'}")
def rgb_dist_plot(img,ax):
start=0
end=256
for _,color in enumerate(['Red','Green','Blue']):
_=sns.kdeplot(img.histogram()[start:end],label=color,color=color)
_=plt.legend();
start+=256
end+=256
for label in ['Watermark','No Watermark']:
fig, axs = plt.subplots(1, 2 ,figsize=(15,5))
img_id=train[train['Label']==label].head(1)['Image'].values[0]
img_file =Image.open(f"/kaggle/input/machinehack-watermark-challenge/train/{img_id}")
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id,fontsize=18)
_=rgb_dist_plot(img_file,ax=axs[1])
axs[1].set_title("RGB Color Distribution For "+img_id,fontsize=18)
def basic_image_info(df, path):
image_name = []
img_mode = []
img_height = []
img_width = []
img_contrast = []
for file in tqdm_notebook(df['Image']):
image_name.append(file)
img = Image.open(f'{os.path.join(path, file)}')
grey_img = cv2.imread(f'{os.path.join(path, file)}', cv2.COLOR_BGR2GRAY)
img_mode.append(img.mode)
img_width.append(img.width)
img_height.append(img.height)
img_contrast.append(grey_img.std())
return pd.DataFrame({'image_name': image_name, 'img_mode': img_mode, 'img_contrast': img_contrast, 'img_width': img_width, 'img_height': img_height})
train_image_basic_info=basic_image_info(train,
"/kaggle/input/machinehack-watermark-challenge/train")
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,img in enumerate(train_image_basic_info[train_image_basic_info['img_contrast']<15]['image_name'].values):
_=plt.subplot(5,2,i+1)
img_file=img_read(path,img)
plt.imshow(img_file)
plt.axis('off')
plt.title(f"{'With Watermark' if train[train['Image']==img]['Label'].values=='Watermark' else 'No Watermark'}")
for img in train_image_basic_info[train_image_basic_info['img_contrast'] < 15]['image_name'][:10].values:
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
img_file = Image.open(f'/kaggle/input/machinehack-watermark-challenge/train/{img}')
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id, fontsize=18)
_ = rgb_dist_plot(img_file, ax=axs[1])
axs[1].set_title('RGB Color Distribution For ' + img_id, fontsize=18) | code |
130023373/cell_28 | [
"image_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
from tqdm import tqdm_notebook
import cv2 #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,label in enumerate(['Watermark','No Watermark']):
_=plt.subplot(1,2,i+1)
img=img_read(path,train[train['Label']==label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{'With Watermark' if label=='Watermark' else 'No Watermark'}")
def rgb_dist_plot(img,ax):
start=0
end=256
for _,color in enumerate(['Red','Green','Blue']):
_=sns.kdeplot(img.histogram()[start:end],label=color,color=color)
_=plt.legend();
start+=256
end+=256
for label in ['Watermark','No Watermark']:
fig, axs = plt.subplots(1, 2 ,figsize=(15,5))
img_id=train[train['Label']==label].head(1)['Image'].values[0]
img_file =Image.open(f"/kaggle/input/machinehack-watermark-challenge/train/{img_id}")
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id,fontsize=18)
_=rgb_dist_plot(img_file,ax=axs[1])
axs[1].set_title("RGB Color Distribution For "+img_id,fontsize=18)
def basic_image_info(df, path):
image_name = []
img_mode = []
img_height = []
img_width = []
img_contrast = []
for file in tqdm_notebook(df['Image']):
image_name.append(file)
img = Image.open(f'{os.path.join(path, file)}')
grey_img = cv2.imread(f'{os.path.join(path, file)}', cv2.COLOR_BGR2GRAY)
img_mode.append(img.mode)
img_width.append(img.width)
img_height.append(img.height)
img_contrast.append(grey_img.std())
return pd.DataFrame({'image_name': image_name, 'img_mode': img_mode, 'img_contrast': img_contrast, 'img_width': img_width, 'img_height': img_height})
train_image_basic_info=basic_image_info(train,
"/kaggle/input/machinehack-watermark-challenge/train")
print(f"The image mode is:{train_image_basic_info['img_mode'].unique()}, width of images is:{train_image_basic_info['img_width'].unique()}, and the image height is:{train_image_basic_info['img_height'].unique()} ") | code |
130023373/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_ = sns.countplot(x=train['Label'], order=train['Label'].value_counts().index)
_ = plt.title('Target Label Distribution', fontsize=18) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.