path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
89142701/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test.isnull().sum() | code |
89142701/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.head() | code |
89142701/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.shape
train.isnull().sum() | code |
89142701/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.shape
train.isnull().sum()
mask = np.triu(train.drop(['PassengerId'], axis=1).corr())
fig = sns.catplot(x='Pclass',y='Age',data=train,hue='Survived',ci=None)
fig = sns.barplot(x='Pclass',y='Survived',data=train, hue='Sex',ci=None)
for container in fig.containers:
fig.bar_label(container,label_type='center',fmt='%1.2f%%')
fig = sns.barplot(x='Pclass', y='Survived', data=train)
fig.bar_label(fig.containers[0],label_type='center',fmt='%1.1f%%')
fig = sns.barplot(x='Survived',y='Sex',data=train)
fig.bar_label(fig.containers[0],size=14,label_type='center',fmt='%1.2f%%')
g = sns.FacetGrid(train, col='Embarked', size=6)
g.map(sns.barplot, 'Pclass', 'Survived', hue=train.Sex)
g.add_legend()
fig = sns.barplot(x='Embarked', y='Survived', data=train)
fig.bar_label(fig.containers[0], size=14, label_type='center', fmt='%1.2f%%') | code |
16154547/cell_13 | [
"text_plain_output_1.png"
] | import ase as ase
import dscribe as ds
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structure = pd.read_csv('../input/structures.csv')
rcut = 10.0
g2_params = [[1, 2], [0.1, 2], [0.01, 2], [1, 6], [0.1, 6], [0.01, 6]]
g4_params = [[1, 4, 1], [0.1, 4, 1], [0.01, 4, 1], [1, 4, -1], [0.1, 4, -1], [0.01, 4, -1]]
g3_params = None
g5_params = None
tmp_structure = structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy()
species = tmp_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
molecule_atoms = tmp_structure.loc[:, 'atom']
molecule_positions = tmp_structure.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
acsf_features = acsf.create(molecule_system, n_jobs=1)
acsf_features[0]
def create_feature_labels(species, rcut, g2_params=None, g3_params=None, g4_params=None, g5_params=None, transform_to_symbols=True):
def get_atom_id(atom_nr, tranform_to_symbols):
if transform_to_symbols == True:
atom_id = nr_to_symbol[atom_nr]
else:
atom_id = atom_nr
return atom_id
feature_label = []
g_params = {'g1': [rcut], 'g2': g2_params, 'g3': g3_params, 'g4': g4_params, 'g5': g5_params}
tmp_system = ase.Atoms(species, [[0, 0, 0]] * len(species))
nr_to_symbol = {number: symbol for symbol, number in zip(tmp_system.get_chemical_symbols(), tmp_system.get_atomic_numbers())}
atomic_numbers = sorted(tmp_system.get_atomic_numbers())
for atom_nr in atomic_numbers:
atom_id = get_atom_id(atom_nr, transform_to_symbols)
for g in ['g1', 'g2', 'g3']:
params = g_params[g]
if params is not None:
for para in params:
feature_label.append(f'feat_acsf_{g}_{atom_id}_{para}')
for atom_nr in atomic_numbers:
atom_id = get_atom_id(atom_nr, transform_to_symbols)
for i in range(0, atom_nr + 1):
if i in atomic_numbers:
atom_id_2 = get_atom_id(i, transform_to_symbols)
for g in ['g4', 'g5']:
params = g_params[g]
if params is not None:
for para in params:
feature_label.append(f'feat_acsf_{g}_{atom_id}_{atom_id_2}_{para}')
return feature_label
def calculate_symmetric_functions(df_structure, rcut, g2_params=None, g3_params=None, g4_params=None, g5_params=None):
species = df_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
structure_molecules = df_structure.molecule_name.unique()
acsf_feature_labels = create_feature_labels(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
df_structure = df_structure.reindex(columns=df_structure.columns.tolist() + acsf_feature_labels)
df_structure = df_structure.sort_values(['molecule_name', 'atom_index'])
acsf_structure_chunks = calculate_acsf_in_chunks(structure_molecules, df_structure, acsf, acsf_feature_labels)
acsf_structure = pd.DataFrame().append(acsf_structure_chunks)
return acsf_structure
def calculate_acsf_in_chunks(structure_molecules, df_structure, acsf, acsf_feature_labels, step_size=2000):
mol_counter = 0
max_counter = len(structure_molecules)
all_chunks = []
tic = time.time()
while mol_counter * step_size < max_counter:
tmp_molecules = structure_molecules[mol_counter * step_size:(mol_counter + 1) * step_size]
tmp_structure = df_structure.loc[df_structure.molecule_name.isin(tmp_molecules), :].copy()
tmp_results = calculate_acsf_multiple_molecules(tmp_molecules, tmp_structure, acsf, acsf_feature_labels)
all_chunks.append(tmp_results.copy())
mol_counter += 1
return all_chunks
def calculate_acsf_multiple_molecules(molecule_names, df_structure, acsf, acsf_feature_labels):
counter = 0
tic = time.time()
for molecule_name in molecule_names:
df_molecule = df_structure.loc[df_structure.molecule_name == molecule_name, :]
acsf_values = calculate_acsf_single_molecule(df_molecule, acsf)
df_structure.loc[df_structure.molecule_name == molecule_name, acsf_feature_labels] = copy.copy(acsf_values)
counter += 1
return df_structure
def calculate_acsf_single_molecule(df_molecule, acsf):
molecule_atoms = df_molecule.loc[:, 'atom']
molecule_positions = df_molecule.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
return acsf.create(molecule_system, n_jobs=1)
acsf_structure = calculate_symmetric_functions(structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy(), rcut, g2_params=g2_params, g4_params=g4_params)
acsf_structure.head() | code |
16154547/cell_9 | [
"text_plain_output_1.png"
] | import ase as ase
import dscribe as ds
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structure = pd.read_csv('../input/structures.csv')
rcut = 10.0
g2_params = [[1, 2], [0.1, 2], [0.01, 2], [1, 6], [0.1, 6], [0.01, 6]]
g4_params = [[1, 4, 1], [0.1, 4, 1], [0.01, 4, 1], [1, 4, -1], [0.1, 4, -1], [0.01, 4, -1]]
g3_params = None
g5_params = None
tmp_structure = structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy()
species = tmp_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
molecule_atoms = tmp_structure.loc[:, 'atom']
molecule_positions = tmp_structure.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
acsf_features = acsf.create(molecule_system, n_jobs=1)
acsf_features[0] | code |
16154547/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16154547/cell_18 | [
"text_plain_output_1.png"
] | import ase as ase
import dscribe as ds
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structure = pd.read_csv('../input/structures.csv')
rcut = 10.0
g2_params = [[1, 2], [0.1, 2], [0.01, 2], [1, 6], [0.1, 6], [0.01, 6]]
g4_params = [[1, 4, 1], [0.1, 4, 1], [0.01, 4, 1], [1, 4, -1], [0.1, 4, -1], [0.01, 4, -1]]
g3_params = None
g5_params = None
tmp_structure = structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy()
species = tmp_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
molecule_atoms = tmp_structure.loc[:, 'atom']
molecule_positions = tmp_structure.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
acsf_features = acsf.create(molecule_system, n_jobs=1)
acsf_features[0]
def create_feature_labels(species, rcut, g2_params=None, g3_params=None, g4_params=None, g5_params=None, transform_to_symbols=True):
def get_atom_id(atom_nr, tranform_to_symbols):
if transform_to_symbols == True:
atom_id = nr_to_symbol[atom_nr]
else:
atom_id = atom_nr
return atom_id
feature_label = []
g_params = {'g1': [rcut], 'g2': g2_params, 'g3': g3_params, 'g4': g4_params, 'g5': g5_params}
tmp_system = ase.Atoms(species, [[0, 0, 0]] * len(species))
nr_to_symbol = {number: symbol for symbol, number in zip(tmp_system.get_chemical_symbols(), tmp_system.get_atomic_numbers())}
atomic_numbers = sorted(tmp_system.get_atomic_numbers())
for atom_nr in atomic_numbers:
atom_id = get_atom_id(atom_nr, transform_to_symbols)
for g in ['g1', 'g2', 'g3']:
params = g_params[g]
if params is not None:
for para in params:
feature_label.append(f'feat_acsf_{g}_{atom_id}_{para}')
for atom_nr in atomic_numbers:
atom_id = get_atom_id(atom_nr, transform_to_symbols)
for i in range(0, atom_nr + 1):
if i in atomic_numbers:
atom_id_2 = get_atom_id(i, transform_to_symbols)
for g in ['g4', 'g5']:
params = g_params[g]
if params is not None:
for para in params:
feature_label.append(f'feat_acsf_{g}_{atom_id}_{atom_id_2}_{para}')
return feature_label
def calculate_symmetric_functions(df_structure, rcut, g2_params=None, g3_params=None, g4_params=None, g5_params=None):
species = df_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
structure_molecules = df_structure.molecule_name.unique()
acsf_feature_labels = create_feature_labels(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
df_structure = df_structure.reindex(columns=df_structure.columns.tolist() + acsf_feature_labels)
df_structure = df_structure.sort_values(['molecule_name', 'atom_index'])
acsf_structure_chunks = calculate_acsf_in_chunks(structure_molecules, df_structure, acsf, acsf_feature_labels)
acsf_structure = pd.DataFrame().append(acsf_structure_chunks)
return acsf_structure
def calculate_acsf_in_chunks(structure_molecules, df_structure, acsf, acsf_feature_labels, step_size=2000):
mol_counter = 0
max_counter = len(structure_molecules)
all_chunks = []
tic = time.time()
while mol_counter * step_size < max_counter:
tmp_molecules = structure_molecules[mol_counter * step_size:(mol_counter + 1) * step_size]
tmp_structure = df_structure.loc[df_structure.molecule_name.isin(tmp_molecules), :].copy()
tmp_results = calculate_acsf_multiple_molecules(tmp_molecules, tmp_structure, acsf, acsf_feature_labels)
all_chunks.append(tmp_results.copy())
mol_counter += 1
return all_chunks
def calculate_acsf_multiple_molecules(molecule_names, df_structure, acsf, acsf_feature_labels):
counter = 0
tic = time.time()
for molecule_name in molecule_names:
df_molecule = df_structure.loc[df_structure.molecule_name == molecule_name, :]
acsf_values = calculate_acsf_single_molecule(df_molecule, acsf)
df_structure.loc[df_structure.molecule_name == molecule_name, acsf_feature_labels] = copy.copy(acsf_values)
counter += 1
return df_structure
def calculate_acsf_single_molecule(df_molecule, acsf):
molecule_atoms = df_molecule.loc[:, 'atom']
molecule_positions = df_molecule.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
return acsf.create(molecule_system, n_jobs=1)
acsf_structure = calculate_symmetric_functions(structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy(), rcut, g2_params=g2_params, g4_params=g4_params)
def dist(coord_0, coord_1):
return np.sqrt(np.sum((coord_0 - coord_1) ** 2))
def fc(dist, rcut):
return 0.5 * (np.cos(np.pi * dist / rcut) + 1)
test_molecule = structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :]
coord_c = test_molecule.loc[test_molecule.atom == 'C', ['x', 'y', 'z']].values[0]
g1_H = 0
for coord_h in test_molecule.loc[test_molecule.atom == 'H', ['x', 'y', 'z']].values:
dist_h_c = dist(coord_c, coord_h)
if dist_h_c <= rcut:
g1_H += fc(dist_h_c, rcut)
print(f'g1 value is {g1_H}, using rcut: {rcut}')
for para in g2_params:
eta = para[0]
rs = para[1]
g2_H = 0
for coord_h in test_molecule.loc[test_molecule.atom == 'H', ['x', 'y', 'z']].values:
dist_h_c = dist(coord_c, coord_h)
g2_H += np.exp(-eta * (dist_h_c - rs) ** 2) * fc(dist_h_c, rcut)
print(f'g2 value is {g2_H}, using eta: {eta}, rs: {rs}') | code |
16154547/cell_8 | [
"text_plain_output_1.png"
] | import ase as ase
import dscribe as ds
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structure = pd.read_csv('../input/structures.csv')
rcut = 10.0
g2_params = [[1, 2], [0.1, 2], [0.01, 2], [1, 6], [0.1, 6], [0.01, 6]]
g4_params = [[1, 4, 1], [0.1, 4, 1], [0.01, 4, 1], [1, 4, -1], [0.1, 4, -1], [0.01, 4, -1]]
g3_params = None
g5_params = None
tmp_structure = structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy()
species = tmp_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
molecule_atoms = tmp_structure.loc[:, 'atom']
molecule_positions = tmp_structure.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
print(molecule_system)
print(molecule_system.get_atomic_numbers())
print(molecule_system.get_positions()) | code |
16154547/cell_22 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import ase as ase
import dscribe as ds
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structure = pd.read_csv('../input/structures.csv')
rcut = 10.0
g2_params = [[1, 2], [0.1, 2], [0.01, 2], [1, 6], [0.1, 6], [0.01, 6]]
g4_params = [[1, 4, 1], [0.1, 4, 1], [0.01, 4, 1], [1, 4, -1], [0.1, 4, -1], [0.01, 4, -1]]
g3_params = None
g5_params = None
tmp_structure = structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy()
species = tmp_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
molecule_atoms = tmp_structure.loc[:, 'atom']
molecule_positions = tmp_structure.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
acsf_features = acsf.create(molecule_system, n_jobs=1)
acsf_features[0]
def create_feature_labels(species, rcut, g2_params=None, g3_params=None, g4_params=None, g5_params=None, transform_to_symbols=True):
def get_atom_id(atom_nr, tranform_to_symbols):
if transform_to_symbols == True:
atom_id = nr_to_symbol[atom_nr]
else:
atom_id = atom_nr
return atom_id
feature_label = []
g_params = {'g1': [rcut], 'g2': g2_params, 'g3': g3_params, 'g4': g4_params, 'g5': g5_params}
tmp_system = ase.Atoms(species, [[0, 0, 0]] * len(species))
nr_to_symbol = {number: symbol for symbol, number in zip(tmp_system.get_chemical_symbols(), tmp_system.get_atomic_numbers())}
atomic_numbers = sorted(tmp_system.get_atomic_numbers())
for atom_nr in atomic_numbers:
atom_id = get_atom_id(atom_nr, transform_to_symbols)
for g in ['g1', 'g2', 'g3']:
params = g_params[g]
if params is not None:
for para in params:
feature_label.append(f'feat_acsf_{g}_{atom_id}_{para}')
for atom_nr in atomic_numbers:
atom_id = get_atom_id(atom_nr, transform_to_symbols)
for i in range(0, atom_nr + 1):
if i in atomic_numbers:
atom_id_2 = get_atom_id(i, transform_to_symbols)
for g in ['g4', 'g5']:
params = g_params[g]
if params is not None:
for para in params:
feature_label.append(f'feat_acsf_{g}_{atom_id}_{atom_id_2}_{para}')
return feature_label
def calculate_symmetric_functions(df_structure, rcut, g2_params=None, g3_params=None, g4_params=None, g5_params=None):
species = df_structure.atom.unique()
acsf = ds.descriptors.ACSF(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
structure_molecules = df_structure.molecule_name.unique()
acsf_feature_labels = create_feature_labels(species=species, rcut=rcut, g2_params=g2_params, g3_params=g3_params, g4_params=g4_params, g5_params=g5_params)
df_structure = df_structure.reindex(columns=df_structure.columns.tolist() + acsf_feature_labels)
df_structure = df_structure.sort_values(['molecule_name', 'atom_index'])
acsf_structure_chunks = calculate_acsf_in_chunks(structure_molecules, df_structure, acsf, acsf_feature_labels)
acsf_structure = pd.DataFrame().append(acsf_structure_chunks)
return acsf_structure
def calculate_acsf_in_chunks(structure_molecules, df_structure, acsf, acsf_feature_labels, step_size=2000):
mol_counter = 0
max_counter = len(structure_molecules)
all_chunks = []
tic = time.time()
while mol_counter * step_size < max_counter:
tmp_molecules = structure_molecules[mol_counter * step_size:(mol_counter + 1) * step_size]
tmp_structure = df_structure.loc[df_structure.molecule_name.isin(tmp_molecules), :].copy()
tmp_results = calculate_acsf_multiple_molecules(tmp_molecules, tmp_structure, acsf, acsf_feature_labels)
all_chunks.append(tmp_results.copy())
mol_counter += 1
return all_chunks
def calculate_acsf_multiple_molecules(molecule_names, df_structure, acsf, acsf_feature_labels):
counter = 0
tic = time.time()
for molecule_name in molecule_names:
df_molecule = df_structure.loc[df_structure.molecule_name == molecule_name, :]
acsf_values = calculate_acsf_single_molecule(df_molecule, acsf)
df_structure.loc[df_structure.molecule_name == molecule_name, acsf_feature_labels] = copy.copy(acsf_values)
counter += 1
return df_structure
def calculate_acsf_single_molecule(df_molecule, acsf):
molecule_atoms = df_molecule.loc[:, 'atom']
molecule_positions = df_molecule.loc[:, ['x', 'y', 'z']]
molecule_system = ase.atoms.Atoms(symbols=molecule_atoms, positions=molecule_positions)
return acsf.create(molecule_system, n_jobs=1)
acsf_structure = calculate_symmetric_functions(structure.loc[structure.molecule_name == 'dsgdb9nsd_000001', :].copy(), rcut, g2_params=g2_params, g4_params=g4_params)
feature_columns = [col for col in acsf_structure.columns if col.startswith('feat_acsf')]
len_features = len(feature_columns)
print(f'We have {len_features} feautres')
print(f'Boris announced ~ 250') | code |
1009798/cell_4 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
import numpy as np # linear algebra
import os
import os
from glob import glob
TRAIN_DATA = '../input/train'
type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg'))
type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files])
type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg'))
type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files])
type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg'))
type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files])
TEST_DATA = '../input/test'
test_files = glob(os.path.join(TEST_DATA, '*.jpg'))
test_ids = np.array([s[len(TEST_DATA) + 1:-4] for s in test_files])
print(len(test_ids))
print(test_ids[:10]) | code |
1009798/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1009798/cell_11 | [
"text_plain_output_1.png"
] | from glob import glob
import cv2
import matplotlib.pylab as plt
import numpy as np # linear algebra
import os
import os
from glob import glob
TRAIN_DATA = '../input/train'
type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg'))
type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files])
type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg'))
type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files])
type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg'))
type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files])
TEST_DATA = '../input/test'
test_files = glob(os.path.join(TEST_DATA, '*.jpg'))
test_ids = np.array([s[len(TEST_DATA) + 1:-4] for s in test_files])
ADDITIONAL_DATA = '../input/additional'
additional_type_1_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_1', '*.jpg'))
additional_type_1_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_1')) + 1:-4] for s in additional_type_1_files])
additional_type_2_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_2', '*.jpg'))
additional_type_2_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_2')) + 1:-4] for s in additional_type_2_files])
additional_type_3_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_3', '*.jpg'))
additional_type_3_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_3')) + 1:-4] for s in additional_type_3_files])
def get_filename(image_id, image_type):
"""
Method to get image file path from its id and type
"""
if image_type == 'Type_1' or image_type == 'Type_2' or image_type == 'Type_3':
data_path = os.path.join(TRAIN_DATA, image_type)
elif image_type == 'Test':
data_path = TEST_DATA
elif image_type == 'AType_1' or image_type == 'AType_2' or image_type == 'AType_3':
data_path = os.path.join(ADDITIONAL_DATA, image_type)
else:
raise Exception("Image type '%s' is not recognized" % image_type)
ext = 'jpg'
return os.path.join(data_path, '{}.{}'.format(image_id, ext))
import cv2
def get_image_data(image_id, image_type):
"""
Method to get image data as np.array specifying image id and type
"""
fname = get_filename(image_id, image_type)
img = cv2.imread(fname)
assert img is not None, 'Failed to read image : %s, %s' % (image_id, image_type)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
import matplotlib.pylab as plt
def plt_st(l1, l2):
pass
tile_size = (256, 256)
n = 15
complete_images = []
for k, type_ids in enumerate([type_1_ids, type_2_ids, type_3_ids]):
m = int(np.floor(len(type_ids) / n))
complete_image = np.zeros((m * (tile_size[0] + 2), n * (tile_size[1] + 2), 3), dtype=np.uint8)
train_ids = sorted(type_ids)
counter = 0
for i in range(m):
ys = i * (tile_size[1] + 2)
ye = ys + tile_size[1]
for j in range(n):
xs = j * (tile_size[0] + 2)
xe = xs + tile_size[0]
image_id = train_ids[counter]
counter += 1
img = get_image_data(image_id, 'Type_%i' % (k + 1))
img = cv2.resize(img, dsize=tile_size)
img = cv2.putText(img, image_id, (5, img.shape[0] - 5), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), thickness=3)
complete_image[ys:ye, xs:xe] = img[:, :, :]
complete_images.append(complete_image)
plt_st(20, 20)
index = 1
m = complete_images[index].shape[0] / (tile_size[0] + 2)
n = int(np.ceil(m / 20))
for i in range(n):
plt_st(20, 20)
ys = i * (tile_size[0] + 2) * 20
ye = min((i + 1) * (tile_size[0] + 2) * 20, complete_images[index].shape[0])
plt.imshow(complete_images[index][ys:ye, :, :])
plt.title('Training dataset of type %i, part %i' % (index + 1, i)) | code |
1009798/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from glob import glob
import numpy as np # linear algebra
import os
import os
from glob import glob
TRAIN_DATA = '../input/train'
type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg'))
type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files])
type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg'))
type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files])
type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg'))
type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files])
print(len(type_1_files), len(type_2_files), len(type_3_files))
print('Type 1', type_1_ids[:10])
print('Type 2', type_2_ids[:10])
print('Type 3', type_3_ids[:10]) | code |
1009798/cell_10 | [
"text_plain_output_1.png"
] | from glob import glob
import cv2
import matplotlib.pylab as plt
import numpy as np # linear algebra
import os
import os
from glob import glob
TRAIN_DATA = '../input/train'
type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg'))
type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files])
type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg'))
type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files])
type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg'))
type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files])
TEST_DATA = '../input/test'
test_files = glob(os.path.join(TEST_DATA, '*.jpg'))
test_ids = np.array([s[len(TEST_DATA) + 1:-4] for s in test_files])
ADDITIONAL_DATA = '../input/additional'
additional_type_1_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_1', '*.jpg'))
additional_type_1_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_1')) + 1:-4] for s in additional_type_1_files])
additional_type_2_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_2', '*.jpg'))
additional_type_2_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_2')) + 1:-4] for s in additional_type_2_files])
additional_type_3_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_3', '*.jpg'))
additional_type_3_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_3')) + 1:-4] for s in additional_type_3_files])
def get_filename(image_id, image_type):
"""
Method to get image file path from its id and type
"""
if image_type == 'Type_1' or image_type == 'Type_2' or image_type == 'Type_3':
data_path = os.path.join(TRAIN_DATA, image_type)
elif image_type == 'Test':
data_path = TEST_DATA
elif image_type == 'AType_1' or image_type == 'AType_2' or image_type == 'AType_3':
data_path = os.path.join(ADDITIONAL_DATA, image_type)
else:
raise Exception("Image type '%s' is not recognized" % image_type)
ext = 'jpg'
return os.path.join(data_path, '{}.{}'.format(image_id, ext))
import cv2
def get_image_data(image_id, image_type):
"""
Method to get image data as np.array specifying image id and type
"""
fname = get_filename(image_id, image_type)
img = cv2.imread(fname)
assert img is not None, 'Failed to read image : %s, %s' % (image_id, image_type)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
import matplotlib.pylab as plt
def plt_st(l1, l2):
pass
tile_size = (256, 256)
n = 15
complete_images = []
for k, type_ids in enumerate([type_1_ids, type_2_ids, type_3_ids]):
m = int(np.floor(len(type_ids) / n))
complete_image = np.zeros((m * (tile_size[0] + 2), n * (tile_size[1] + 2), 3), dtype=np.uint8)
train_ids = sorted(type_ids)
counter = 0
for i in range(m):
ys = i * (tile_size[1] + 2)
ye = ys + tile_size[1]
for j in range(n):
xs = j * (tile_size[0] + 2)
xe = xs + tile_size[0]
image_id = train_ids[counter]
counter += 1
img = get_image_data(image_id, 'Type_%i' % (k + 1))
img = cv2.resize(img, dsize=tile_size)
img = cv2.putText(img, image_id, (5, img.shape[0] - 5), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), thickness=3)
complete_image[ys:ye, xs:xe] = img[:, :, :]
complete_images.append(complete_image)
plt_st(20, 20)
plt.imshow(complete_images[0])
plt.title('Training dataset of type %i' % 0) | code |
1009798/cell_5 | [
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
import numpy as np # linear algebra
import os
import os
from glob import glob
TRAIN_DATA = '../input/train'
type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg'))
type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files])
type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg'))
type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files])
type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg'))
type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files])
TEST_DATA = '../input/test'
test_files = glob(os.path.join(TEST_DATA, '*.jpg'))
test_ids = np.array([s[len(TEST_DATA) + 1:-4] for s in test_files])
ADDITIONAL_DATA = '../input/additional'
additional_type_1_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_1', '*.jpg'))
additional_type_1_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_1')) + 1:-4] for s in additional_type_1_files])
additional_type_2_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_2', '*.jpg'))
additional_type_2_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_2')) + 1:-4] for s in additional_type_2_files])
additional_type_3_files = glob(os.path.join(ADDITIONAL_DATA, 'Type_3', '*.jpg'))
additional_type_3_ids = np.array([s[len(os.path.join(ADDITIONAL_DATA, 'Type_3')) + 1:-4] for s in additional_type_3_files])
print(len(additional_type_1_files), len(additional_type_2_files), len(additional_type_2_files))
print('Type 1', additional_type_1_ids[:10])
print('Type 2', additional_type_2_ids[:10])
print('Type 3', additional_type_3_ids[:10]) | code |
34144954/cell_9 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, ReduceLROnPlateau
from tensorflow.keras.layers import BatchNormalization,Activation,Dropout,Dense
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D
import cv2
import glob
import keras
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import numpy as np
import pandas as pd
import os
submission_sumple = pd.read_csv('/kaggle/input/aiacademydeeplearning/sample_submission.csv')
train = pd.read_csv('/kaggle/input/aiacademydeeplearning/train.csv')
num_cols = ['bedrooms', 'bathrooms', 'area', 'zipcode']
target = ['price']
train[num_cols] = train[num_cols].fillna(-99999)
Scaler = StandardScaler()
train[num_cols] = Scaler.fit_transform(train[num_cols])
test = pd.read_csv('/kaggle/input/aiacademydeeplearning/test.csv')
test[num_cols] = test[num_cols].fillna(-99999)
Scaler = StandardScaler()
test[num_cols] = Scaler.fit_transform(test[num_cols])
def load_images(df, inputPath, size, roomType1, roomType2, roomType3, roomType4):
images = []
for i in df['id']:
basePath1 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType1)])
basePath2 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType2)])
basePath3 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType3)])
basePath4 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType4)])
housePaths1 = sorted(list(glob.glob(basePath1)))
housePaths2 = sorted(list(glob.glob(basePath2)))
housePaths3 = sorted(list(glob.glob(basePath3)))
housePaths4 = sorted(list(glob.glob(basePath4)))
for housePath1 in housePaths1:
image1 = cv2.imread(housePath1)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image1 = cv2.resize(image1, (size, size))
for housePath2 in housePaths2:
image2 = cv2.imread(housePath2)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
image2 = cv2.resize(image2, (size, size))
for housePath3 in housePaths3:
image3 = cv2.imread(housePath3)
image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB)
image3 = cv2.resize(image3, (size, size))
for housePath4 in housePaths4:
image4 = cv2.imread(housePath4)
image4 = cv2.cvtColor(image4, cv2.COLOR_BGR2RGB)
image4 = cv2.resize(image4, (size, size))
image1_2 = cv2.vconcat([image1, image2])
image3_4 = cv2.vconcat([image3, image4])
image_all = cv2.hconcat([image1_2, image1_2])
images.append(image_all)
return np.array(images) / 255.0
inputPath = '/kaggle/input/aiacademydeeplearning/train_images/'
size = 28
roomType1 = 'kitchen'
roomType2 = 'bathroom'
roomType3 = 'bedroom'
roomType4 = 'frontal'
train_images = load_images(train, inputPath, size, roomType1, roomType2, roomType3, roomType4)
inputPath_test = '/kaggle/input/aiacademydeeplearning/test_images/'
test_images = load_images(test, inputPath_test, size, roomType1, roomType2, roomType3, roomType4)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = (np.array(y_true), np.array(y_pred))
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
if int(tf.__version__.split('.')[0]) >= 2:
from tensorflow import keras
else:
import keras
inputs = keras.layers.Input(shape=(size * 2, size * 2, 3))
lay1 = keras.layers.Conv2D(filters=32, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal')(inputs)
lay2 = MaxPooling2D(pool_size=(2, 2))(lay1)
lay3 = BatchNormalization()(lay2)
lay4 = Dropout(0.2)(lay3)
lay5 = Conv2D(filters=64, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal')(lay4)
lay6 = MaxPooling2D(pool_size=(2, 2))(lay5)
lay7 = BatchNormalization()(lay6)
lay8 = Dropout(0.2)(lay7)
lay9 = Conv2D(filters=128, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal')(lay8)
lay10 = MaxPooling2D(pool_size=(2, 2))(lay9)
lay11 = BatchNormalization()(lay10)
lay12 = Dropout(0.2)(lay11)
lay13 = Flatten()(lay12)
lay14 = Dense(units=256, activation='relu', kernel_initializer='he_normal')(lay13)
inputs_mlp = keras.layers.Input(shape=(4,))
lay1_mlp = Dense(units=512, input_shape=(len(num_cols),), kernel_initializer='he_normal', activation='relu')(inputs_mlp)
lay2_mlp = Dropout(0.2)(lay1_mlp)
lay3_mlp = Dense(units=256, kernel_initializer='he_normal', activation='relu')(lay2_mlp)
lay4_mlp = Dropout(0.2)(lay3_mlp)
merged = keras.layers.concatenate([lay14, lay4_mlp])
lay15 = Dense(units=32, activation='relu', kernel_initializer='he_normal')(merged)
lay16 = Dense(units=1, activation='linear')(lay15)
model = keras.Model(inputs=[inputs, inputs_mlp], outputs=lay16)
model.compile(loss='mape', optimizer='adam', metrics=['mape'])
filepath = 'cnn_best_model.hdf5'
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=filepath, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
n = 5
y_pred = np.zeros(len(test))
mape_scores = []
for i in range(n):
train_x, valid_x, train_images_x, valid_images_x = train_test_split(train, train_images, test_size=0.2, random_state=i * 10)
train_y = train_x['price'].values
valid_y = valid_x['price'].values
train_table, valid_table = train_test_split(train, test_size=0.2, random_state=i * 10)
train_f, train_t = (train_table[num_cols].values, train_table[target].values)
valid_f, valid_t = (valid_table[num_cols].values, valid_table[target].values)
model.fit([train_images_x, train_f], train_y, validation_data=([valid_images_x, valid_f], valid_y), epochs=50, batch_size=16, callbacks=[es, checkpoint, reduce_lr_loss])
model.load_weights(filepath)
valid_pred = model.predict([valid_images_x, valid_f], batch_size=32).reshape((-1, 1))
mape_score = mean_absolute_percentage_error(valid_y, valid_pred)
mape_scores.append(mape_score)
test_pred = model.predict([test_images, test[num_cols].values], batch_size=32).reshape((-1, 1))
y_pred += test_pred.reshape([len(test)])
ykai = y_pred / n
print(mape_scores) | code |
34144954/cell_6 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import cv2
import glob
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
submission_sumple = pd.read_csv('/kaggle/input/aiacademydeeplearning/sample_submission.csv')
train = pd.read_csv('/kaggle/input/aiacademydeeplearning/train.csv')
num_cols = ['bedrooms', 'bathrooms', 'area', 'zipcode']
target = ['price']
train[num_cols] = train[num_cols].fillna(-99999)
Scaler = StandardScaler()
train[num_cols] = Scaler.fit_transform(train[num_cols])
def load_images(df, inputPath, size, roomType1, roomType2, roomType3, roomType4):
images = []
for i in df['id']:
basePath1 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType1)])
basePath2 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType2)])
basePath3 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType3)])
basePath4 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType4)])
housePaths1 = sorted(list(glob.glob(basePath1)))
housePaths2 = sorted(list(glob.glob(basePath2)))
housePaths3 = sorted(list(glob.glob(basePath3)))
housePaths4 = sorted(list(glob.glob(basePath4)))
for housePath1 in housePaths1:
image1 = cv2.imread(housePath1)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image1 = cv2.resize(image1, (size, size))
for housePath2 in housePaths2:
image2 = cv2.imread(housePath2)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
image2 = cv2.resize(image2, (size, size))
for housePath3 in housePaths3:
image3 = cv2.imread(housePath3)
image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB)
image3 = cv2.resize(image3, (size, size))
for housePath4 in housePaths4:
image4 = cv2.imread(housePath4)
image4 = cv2.cvtColor(image4, cv2.COLOR_BGR2RGB)
image4 = cv2.resize(image4, (size, size))
image1_2 = cv2.vconcat([image1, image2])
image3_4 = cv2.vconcat([image3, image4])
image_all = cv2.hconcat([image1_2, image1_2])
images.append(image_all)
return np.array(images) / 255.0
inputPath = '/kaggle/input/aiacademydeeplearning/train_images/'
size = 28
roomType1 = 'kitchen'
roomType2 = 'bathroom'
roomType3 = 'bedroom'
roomType4 = 'frontal'
train_images = load_images(train, inputPath, size, roomType1, roomType2, roomType3, roomType4)
display(train_images.shape)
display(train_images[0][0][0])
print(train_images.shape[1]) | code |
34144954/cell_1 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34144954/cell_7 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import cv2
import glob
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
submission_sumple = pd.read_csv('/kaggle/input/aiacademydeeplearning/sample_submission.csv')
train = pd.read_csv('/kaggle/input/aiacademydeeplearning/train.csv')
num_cols = ['bedrooms', 'bathrooms', 'area', 'zipcode']
target = ['price']
train[num_cols] = train[num_cols].fillna(-99999)
Scaler = StandardScaler()
train[num_cols] = Scaler.fit_transform(train[num_cols])
test = pd.read_csv('/kaggle/input/aiacademydeeplearning/test.csv')
test[num_cols] = test[num_cols].fillna(-99999)
Scaler = StandardScaler()
test[num_cols] = Scaler.fit_transform(test[num_cols])
def load_images(df, inputPath, size, roomType1, roomType2, roomType3, roomType4):
images = []
for i in df['id']:
basePath1 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType1)])
basePath2 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType2)])
basePath3 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType3)])
basePath4 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType4)])
housePaths1 = sorted(list(glob.glob(basePath1)))
housePaths2 = sorted(list(glob.glob(basePath2)))
housePaths3 = sorted(list(glob.glob(basePath3)))
housePaths4 = sorted(list(glob.glob(basePath4)))
for housePath1 in housePaths1:
image1 = cv2.imread(housePath1)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image1 = cv2.resize(image1, (size, size))
for housePath2 in housePaths2:
image2 = cv2.imread(housePath2)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
image2 = cv2.resize(image2, (size, size))
for housePath3 in housePaths3:
image3 = cv2.imread(housePath3)
image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB)
image3 = cv2.resize(image3, (size, size))
for housePath4 in housePaths4:
image4 = cv2.imread(housePath4)
image4 = cv2.cvtColor(image4, cv2.COLOR_BGR2RGB)
image4 = cv2.resize(image4, (size, size))
image1_2 = cv2.vconcat([image1, image2])
image3_4 = cv2.vconcat([image3, image4])
image_all = cv2.hconcat([image1_2, image1_2])
images.append(image_all)
return np.array(images) / 255.0
inputPath = '/kaggle/input/aiacademydeeplearning/train_images/'
size = 28
roomType1 = 'kitchen'
roomType2 = 'bathroom'
roomType3 = 'bedroom'
roomType4 = 'frontal'
train_images = load_images(train, inputPath, size, roomType1, roomType2, roomType3, roomType4)
inputPath_test = '/kaggle/input/aiacademydeeplearning/test_images/'
test_images = load_images(test, inputPath_test, size, roomType1, roomType2, roomType3, roomType4)
display(test_images.shape)
display(test_images[0][0][0]) | code |
34144954/cell_14 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, ReduceLROnPlateau
from tensorflow.keras.layers import BatchNormalization,Activation,Dropout,Dense
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D
import cv2
import glob
import keras
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import numpy as np
import pandas as pd
import os
submission_sumple = pd.read_csv('/kaggle/input/aiacademydeeplearning/sample_submission.csv')
train = pd.read_csv('/kaggle/input/aiacademydeeplearning/train.csv')
num_cols = ['bedrooms', 'bathrooms', 'area', 'zipcode']
target = ['price']
train[num_cols] = train[num_cols].fillna(-99999)
Scaler = StandardScaler()
train[num_cols] = Scaler.fit_transform(train[num_cols])
test = pd.read_csv('/kaggle/input/aiacademydeeplearning/test.csv')
test[num_cols] = test[num_cols].fillna(-99999)
Scaler = StandardScaler()
test[num_cols] = Scaler.fit_transform(test[num_cols])
def load_images(df, inputPath, size, roomType1, roomType2, roomType3, roomType4):
images = []
for i in df['id']:
basePath1 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType1)])
basePath2 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType2)])
basePath3 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType3)])
basePath4 = os.path.sep.join([inputPath, '{}_{}*'.format(i, roomType4)])
housePaths1 = sorted(list(glob.glob(basePath1)))
housePaths2 = sorted(list(glob.glob(basePath2)))
housePaths3 = sorted(list(glob.glob(basePath3)))
housePaths4 = sorted(list(glob.glob(basePath4)))
for housePath1 in housePaths1:
image1 = cv2.imread(housePath1)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image1 = cv2.resize(image1, (size, size))
for housePath2 in housePaths2:
image2 = cv2.imread(housePath2)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
image2 = cv2.resize(image2, (size, size))
for housePath3 in housePaths3:
image3 = cv2.imread(housePath3)
image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB)
image3 = cv2.resize(image3, (size, size))
for housePath4 in housePaths4:
image4 = cv2.imread(housePath4)
image4 = cv2.cvtColor(image4, cv2.COLOR_BGR2RGB)
image4 = cv2.resize(image4, (size, size))
image1_2 = cv2.vconcat([image1, image2])
image3_4 = cv2.vconcat([image3, image4])
image_all = cv2.hconcat([image1_2, image1_2])
images.append(image_all)
return np.array(images) / 255.0
inputPath = '/kaggle/input/aiacademydeeplearning/train_images/'
size = 28
roomType1 = 'kitchen'
roomType2 = 'bathroom'
roomType3 = 'bedroom'
roomType4 = 'frontal'
train_images = load_images(train, inputPath, size, roomType1, roomType2, roomType3, roomType4)
inputPath_test = '/kaggle/input/aiacademydeeplearning/test_images/'
test_images = load_images(test, inputPath_test, size, roomType1, roomType2, roomType3, roomType4)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = (np.array(y_true), np.array(y_pred))
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
if int(tf.__version__.split('.')[0]) >= 2:
from tensorflow import keras
else:
import keras
inputs = keras.layers.Input(shape=(size * 2, size * 2, 3))
lay1 = keras.layers.Conv2D(filters=32, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal')(inputs)
lay2 = MaxPooling2D(pool_size=(2, 2))(lay1)
lay3 = BatchNormalization()(lay2)
lay4 = Dropout(0.2)(lay3)
lay5 = Conv2D(filters=64, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal')(lay4)
lay6 = MaxPooling2D(pool_size=(2, 2))(lay5)
lay7 = BatchNormalization()(lay6)
lay8 = Dropout(0.2)(lay7)
lay9 = Conv2D(filters=128, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal')(lay8)
lay10 = MaxPooling2D(pool_size=(2, 2))(lay9)
lay11 = BatchNormalization()(lay10)
lay12 = Dropout(0.2)(lay11)
lay13 = Flatten()(lay12)
lay14 = Dense(units=256, activation='relu', kernel_initializer='he_normal')(lay13)
inputs_mlp = keras.layers.Input(shape=(4,))
lay1_mlp = Dense(units=512, input_shape=(len(num_cols),), kernel_initializer='he_normal', activation='relu')(inputs_mlp)
lay2_mlp = Dropout(0.2)(lay1_mlp)
lay3_mlp = Dense(units=256, kernel_initializer='he_normal', activation='relu')(lay2_mlp)
lay4_mlp = Dropout(0.2)(lay3_mlp)
merged = keras.layers.concatenate([lay14, lay4_mlp])
lay15 = Dense(units=32, activation='relu', kernel_initializer='he_normal')(merged)
lay16 = Dense(units=1, activation='linear')(lay15)
model = keras.Model(inputs=[inputs, inputs_mlp], outputs=lay16)
model.compile(loss='mape', optimizer='adam', metrics=['mape'])
filepath = 'cnn_best_model.hdf5'
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=filepath, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
n = 5
y_pred = np.zeros(len(test))
mape_scores = []
for i in range(n):
train_x, valid_x, train_images_x, valid_images_x = train_test_split(train, train_images, test_size=0.2, random_state=i * 10)
train_y = train_x['price'].values
valid_y = valid_x['price'].values
train_table, valid_table = train_test_split(train, test_size=0.2, random_state=i * 10)
train_f, train_t = (train_table[num_cols].values, train_table[target].values)
valid_f, valid_t = (valid_table[num_cols].values, valid_table[target].values)
model.fit([train_images_x, train_f], train_y, validation_data=([valid_images_x, valid_f], valid_y), epochs=50, batch_size=16, callbacks=[es, checkpoint, reduce_lr_loss])
model.load_weights(filepath)
valid_pred = model.predict([valid_images_x, valid_f], batch_size=32).reshape((-1, 1))
mape_score = mean_absolute_percentage_error(valid_y, valid_pred)
mape_scores.append(mape_score)
test_pred = model.predict([test_images, test[num_cols].values], batch_size=32).reshape((-1, 1))
y_pred += test_pred.reshape([len(test)])
ykai = y_pred / n
final = ykai
df = pd.DataFrame(final, columns=['price'])
submission_sumple2 = submission_sumple.drop(['price'], axis=1)
kai = pd.concat([submission_sumple2, df], axis=1)
kai.to_csv('submission.csv', index=False)
kai | code |
34144954/cell_5 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
submission_sumple = pd.read_csv('/kaggle/input/aiacademydeeplearning/sample_submission.csv')
train = pd.read_csv('/kaggle/input/aiacademydeeplearning/train.csv')
num_cols = ['bedrooms', 'bathrooms', 'area', 'zipcode']
target = ['price']
train[num_cols] = train[num_cols].fillna(-99999)
Scaler = StandardScaler()
train[num_cols] = Scaler.fit_transform(train[num_cols])
test = pd.read_csv('/kaggle/input/aiacademydeeplearning/test.csv')
test[num_cols] = test[num_cols].fillna(-99999)
Scaler = StandardScaler()
test[num_cols] = Scaler.fit_transform(test[num_cols])
display(test.shape)
display(test.head()) | code |
16133160/cell_6 | [
"image_output_1.png"
] | import os
import os
os.listdir('../input') | code |
16133160/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from html.parser import HTMLParser
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from wordcloud import WordCloud, STOPWORDS
import collections
import gensim
import nltk
import pandas as pd
import pandas as pd
import re
import scipy.cluster.hierarchy as shc
import unicodedata
import unicodedata
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._lines = []
def error(self, message):
pass
@staticmethod
def remove_control_characters(s):
return ''.join((ch for ch in s if unicodedata.category(ch)[0] != 'C'))
def read(self, data):
self.reset()
self.feed(data)
return ' '.join(self._lines)
def handle_data(self, data):
data = self.remove_control_characters(data)
data = data.strip()
self._lines.append(data)
import pandas as pd
class ImportData:
@staticmethod
def read_data(file_name):
data = pd.read_csv(file_name)
try:
data = data.drop(['Unnamed: 0'], axis=0)
except KeyError:
pass
return data
@staticmethod
def handle_null_values(data):
data.drop(columns=['product_meta_keywords'], inplace=True)
data.dropna(axis=0, inplace=True)
return data
def import_data(self, csv_file):
data = self.read_data(csv_file)
data = self.handle_null_values(data)
return data
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
class Preprocess:
def __init__(self):
nltk.download('stopwords')
self.STOPWORDS = set(stopwords.words('english'))
self.STRIP_PUNCTUATION = re.compile('[",\\(\\)\\|!\\?;:]')
self.STRIP_PERIODS = re.compile('\\.(?!\\d)')
@staticmethod
def strip_tags(html):
s = HTMLStripper()
return s.read(html)
def normalize(self, text):
text = text.lower()
text = self.STRIP_PUNCTUATION.sub('', text)
text = self.STRIP_PERIODS.sub('', text)
text = text.split(' ')
text = [word for word in text if word not in self.STOPWORDS]
text = ' '.join(text)
text = [WordNetLemmatizer().lemmatize(word) for word in text]
text = ''.join(text)
text = text.strip()
text = re.sub(' +', ' ', text)
return text
def process(self, html):
text = self.strip_tags(html)
text = self.normalize(text)
return text
from wordcloud import WordCloud, STOPWORDS
from matplotlib import pyplot as plt
class Visualize:
def __init__(self, width=500, height=500, bg_color='white', font_size=15, stopwords=STOPWORDS, text=''):
self.width = width
self.height = height
self.bg_color = bg_color
self.font_size = font_size
self.stopwords = stopwords
self.text = text
self.wordcloud = WordCloud(width=self.width, height=self.height, background_color=self.bg_color, stopwords=self.stopwords, min_font_size=self.font_size)
def plot(self):
wordcloud = self.wordcloud.generate(self.text)
plt.axis('off')
plt.tight_layout(pad=0)
import gensim
import pandas as pd
import collections
from nltk.tokenize import word_tokenize
class Doc2Vec:
def __init__(self, filename='../input/Product_Details.csv'):
self.data = pd.read_csv(filename)
self.train_corpus = self.get_train_data()
self.test_corpus = self.get_test_data()
def read_corpus(self, tokens_only=False):
for i, row in enumerate(self.data):
if tokens_only:
yield word_tokenize(row)
else:
yield gensim.models.doc2vec.TaggedDocument(word_tokenize(row), [i])
def get_train_data(self):
train_corpus = list(self.read_corpus())
return train_corpus
def get_test_data(self):
test_corpus = list(self.read_corpus(tokens_only=True))
return test_corpus
def train_model(self, epochs=40, vector_size=50, min_count=2, workers=4, save_model=True):
model = gensim.models.doc2vec.Doc2Vec(vector_size=vector_size, min_count=min_count, epochs=epochs, workers=workers)
model.build_vocab(self.train_corpus)
model.train(self.train_corpus, total_examples=model.corpus_count, epochs=model.epochs)
if save_model:
name = 'doc2vec.model'
model.save(name)
return model
def test_model(self):
model = self.train_model()
ranks = []
second_ranks = []
for doc_id in range(len(self.train_corpus)):
inferred_vector = model.infer_vector(self.train_corpus[doc_id].words)
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
second_ranks.append(sims[1])
collections.Counter(ranks)
def export_vsm_vocab(self, model):
document_vector_list = []
for i in range(self.data.shape[0]):
document_vector_list.append(model.docvecs[i])
return document_vector_list
data_importer = ImportData()
data = data_importer.import_data('../input/qiagen-detail/Qiagen_details.csv')
def TaggedDoc(dataframe):
global punct
tagdoc = []
for i in range(len(dataframe)):
x = dataframe.iloc[i]
y = gensim.models.doc2vec.TaggedDocument(x.lower().split(), [i])
tagdoc.append(y)
return tagdoc
df_desc = TaggedDoc(data['product_html'])
model = gensim.models.doc2vec.Doc2Vec(vector_size=50, max_count=5, epochs=30, dm=1, workers=4, dbow_words=0)
model.build_vocab(df_desc)
len(model.wv.vocab)
import scipy.cluster.hierarchy as shc
plt.figure(figsize=(10, 8))
plt.title('Dendrograms')
dend = shc.dendrogram(shc.linkage(model.docvecs.vectors_docs, method='ward')) | code |
16133160/cell_8 | [
"text_plain_output_1.png"
] | from html.parser import HTMLParser
import pandas as pd
import pandas as pd
import unicodedata
import unicodedata
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._lines = []
def error(self, message):
pass
@staticmethod
def remove_control_characters(s):
return ''.join((ch for ch in s if unicodedata.category(ch)[0] != 'C'))
def read(self, data):
self.reset()
self.feed(data)
return ' '.join(self._lines)
def handle_data(self, data):
data = self.remove_control_characters(data)
data = data.strip()
self._lines.append(data)
import pandas as pd
class ImportData:
@staticmethod
def read_data(file_name):
data = pd.read_csv(file_name)
try:
data = data.drop(['Unnamed: 0'], axis=0)
except KeyError:
pass
return data
@staticmethod
def handle_null_values(data):
data.drop(columns=['product_meta_keywords'], inplace=True)
data.dropna(axis=0, inplace=True)
return data
def import_data(self, csv_file):
data = self.read_data(csv_file)
data = self.handle_null_values(data)
return data
data_importer = ImportData()
data = data_importer.import_data('../input/qiagen-detail/Qiagen_details.csv')
data.head() | code |
16133160/cell_15 | [
"text_html_output_1.png"
] | from html.parser import HTMLParser
import pandas as pd
import pandas as pd
import unicodedata
import unicodedata
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._lines = []
def error(self, message):
pass
@staticmethod
def remove_control_characters(s):
return ''.join((ch for ch in s if unicodedata.category(ch)[0] != 'C'))
def read(self, data):
self.reset()
self.feed(data)
return ' '.join(self._lines)
def handle_data(self, data):
data = self.remove_control_characters(data)
data = data.strip()
self._lines.append(data)
import pandas as pd
class ImportData:
@staticmethod
def read_data(file_name):
data = pd.read_csv(file_name)
try:
data = data.drop(['Unnamed: 0'], axis=0)
except KeyError:
pass
return data
@staticmethod
def handle_null_values(data):
data.drop(columns=['product_meta_keywords'], inplace=True)
data.dropna(axis=0, inplace=True)
return data
def import_data(self, csv_file):
data = self.read_data(csv_file)
data = self.handle_null_values(data)
return data
data_importer = ImportData()
data = data_importer.import_data('../input/qiagen-detail/Qiagen_details.csv')
len(data['product_category']) | code |
16133160/cell_16 | [
"text_plain_output_1.png"
] | len(ist) | code |
16133160/cell_24 | [
"text_plain_output_1.png"
] | code |
|
16133160/cell_14 | [
"text_plain_output_1.png"
] | from html.parser import HTMLParser
import pandas as pd
import pandas as pd
import unicodedata
import unicodedata
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._lines = []
def error(self, message):
pass
@staticmethod
def remove_control_characters(s):
return ''.join((ch for ch in s if unicodedata.category(ch)[0] != 'C'))
def read(self, data):
self.reset()
self.feed(data)
return ' '.join(self._lines)
def handle_data(self, data):
data = self.remove_control_characters(data)
data = data.strip()
self._lines.append(data)
import pandas as pd
class ImportData:
@staticmethod
def read_data(file_name):
data = pd.read_csv(file_name)
try:
data = data.drop(['Unnamed: 0'], axis=0)
except KeyError:
pass
return data
@staticmethod
def handle_null_values(data):
data.drop(columns=['product_meta_keywords'], inplace=True)
data.dropna(axis=0, inplace=True)
return data
def import_data(self, csv_file):
data = self.read_data(csv_file)
data = self.handle_null_values(data)
return data
data_importer = ImportData()
data = data_importer.import_data('../input/qiagen-detail/Qiagen_details.csv')
len(data['product_html']) | code |
16133160/cell_22 | [
"text_plain_output_1.png"
] | from html.parser import HTMLParser
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from wordcloud import WordCloud, STOPWORDS
import collections
import gensim
import nltk
import pandas as pd
import pandas as pd
import re
import unicodedata
import unicodedata
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._lines = []
def error(self, message):
pass
@staticmethod
def remove_control_characters(s):
return ''.join((ch for ch in s if unicodedata.category(ch)[0] != 'C'))
def read(self, data):
self.reset()
self.feed(data)
return ' '.join(self._lines)
def handle_data(self, data):
data = self.remove_control_characters(data)
data = data.strip()
self._lines.append(data)
import pandas as pd
class ImportData:
@staticmethod
def read_data(file_name):
data = pd.read_csv(file_name)
try:
data = data.drop(['Unnamed: 0'], axis=0)
except KeyError:
pass
return data
@staticmethod
def handle_null_values(data):
data.drop(columns=['product_meta_keywords'], inplace=True)
data.dropna(axis=0, inplace=True)
return data
def import_data(self, csv_file):
data = self.read_data(csv_file)
data = self.handle_null_values(data)
return data
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
class Preprocess:
def __init__(self):
nltk.download('stopwords')
self.STOPWORDS = set(stopwords.words('english'))
self.STRIP_PUNCTUATION = re.compile('[",\\(\\)\\|!\\?;:]')
self.STRIP_PERIODS = re.compile('\\.(?!\\d)')
@staticmethod
def strip_tags(html):
s = HTMLStripper()
return s.read(html)
def normalize(self, text):
text = text.lower()
text = self.STRIP_PUNCTUATION.sub('', text)
text = self.STRIP_PERIODS.sub('', text)
text = text.split(' ')
text = [word for word in text if word not in self.STOPWORDS]
text = ' '.join(text)
text = [WordNetLemmatizer().lemmatize(word) for word in text]
text = ''.join(text)
text = text.strip()
text = re.sub(' +', ' ', text)
return text
def process(self, html):
text = self.strip_tags(html)
text = self.normalize(text)
return text
from wordcloud import WordCloud, STOPWORDS
from matplotlib import pyplot as plt
class Visualize:
def __init__(self, width=500, height=500, bg_color='white', font_size=15, stopwords=STOPWORDS, text=''):
self.width = width
self.height = height
self.bg_color = bg_color
self.font_size = font_size
self.stopwords = stopwords
self.text = text
self.wordcloud = WordCloud(width=self.width, height=self.height, background_color=self.bg_color, stopwords=self.stopwords, min_font_size=self.font_size)
def plot(self):
wordcloud = self.wordcloud.generate(self.text)
plt.axis('off')
plt.tight_layout(pad=0)
import gensim
import pandas as pd
import collections
from nltk.tokenize import word_tokenize
class Doc2Vec:
def __init__(self, filename='../input/Product_Details.csv'):
self.data = pd.read_csv(filename)
self.train_corpus = self.get_train_data()
self.test_corpus = self.get_test_data()
def read_corpus(self, tokens_only=False):
for i, row in enumerate(self.data):
if tokens_only:
yield word_tokenize(row)
else:
yield gensim.models.doc2vec.TaggedDocument(word_tokenize(row), [i])
def get_train_data(self):
train_corpus = list(self.read_corpus())
return train_corpus
def get_test_data(self):
test_corpus = list(self.read_corpus(tokens_only=True))
return test_corpus
def train_model(self, epochs=40, vector_size=50, min_count=2, workers=4, save_model=True):
model = gensim.models.doc2vec.Doc2Vec(vector_size=vector_size, min_count=min_count, epochs=epochs, workers=workers)
model.build_vocab(self.train_corpus)
model.train(self.train_corpus, total_examples=model.corpus_count, epochs=model.epochs)
if save_model:
name = 'doc2vec.model'
model.save(name)
return model
def test_model(self):
model = self.train_model()
ranks = []
second_ranks = []
for doc_id in range(len(self.train_corpus)):
inferred_vector = model.infer_vector(self.train_corpus[doc_id].words)
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
second_ranks.append(sims[1])
collections.Counter(ranks)
def export_vsm_vocab(self, model):
document_vector_list = []
for i in range(self.data.shape[0]):
document_vector_list.append(model.docvecs[i])
return document_vector_list
data_importer = ImportData()
data = data_importer.import_data('../input/qiagen-detail/Qiagen_details.csv')
def TaggedDoc(dataframe):
global punct
tagdoc = []
for i in range(len(dataframe)):
x = dataframe.iloc[i]
y = gensim.models.doc2vec.TaggedDocument(x.lower().split(), [i])
tagdoc.append(y)
return tagdoc
df_desc = TaggedDoc(data['product_html'])
model = gensim.models.doc2vec.Doc2Vec(vector_size=50, max_count=5, epochs=30, dm=1, workers=4, dbow_words=0)
model.build_vocab(df_desc)
len(model.wv.vocab) | code |
16133160/cell_10 | [
"text_plain_output_1.png"
] | data_processor = Preprocess() | code |
16133160/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.cluster import AgglomerativeClustering
cluster = AgglomerativeClustering(n_clusters=6, affinity='euclidean', linkage='ward') | code |
16133160/cell_5 | [
"text_plain_output_1.png"
] | !pip install paramiko | code |
34127100/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
sns.pairplot(train_df.drop(['PassengerId', 'Parch', 'SibSp'], axis=1), hue='Survived') | code |
34127100/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
ar_of_modes = np.array(train_df[categorical_col].mode())
val = ar_of_modes[0]
val | code |
34127100/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.info() | code |
34127100/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
sns.countplot(x='Survived', hue='Sex', data=train_df) | code |
34127100/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
print('X_train', len(X_train))
print('X_test', len(X_test))
print('y_train', len(y_train))
print('y_test', len(y_test))
print('test', len(test_data)) | code |
34127100/cell_39 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
lr_score = classifier.score(X_test, y_test)
predictions = classifier.predict(X_test)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, predictions) | code |
34127100/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
train_df.corr() | code |
34127100/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
sns.heatmap(train_df.isna()) | code |
34127100/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34127100/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.heatmap(train_data.isnull()) | code |
34127100/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
print(train_df.isnull().sum()) | code |
34127100/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
train_df.corr()
cor = train_df.corr()
cor_target = abs(cor['Survived'])
print(cor_target.sort_values()) | code |
34127100/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum() | code |
34127100/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum() | code |
34127100/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.head(3) | code |
34127100/cell_35 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train) | code |
34127100/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
sns.countplot(x='Survived', hue='Embarked', data=train_df) | code |
34127100/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
sns.countplot(train_df['Sex']) | code |
34127100/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col
train_df.isnull().sum()
train_df.corr()
plt.figure(figsize=(20, 18))
cor = train_df.corr()
sns.heatmap(cor, annot=True)
plt.show() | code |
34127100/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
lr_score = classifier.score(X_test, y_test)
predictions = classifier.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions)) | code |
34127100/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.isnull().sum()
train_df = train_data.drop('Cabin', axis=True)
categorical_col = train_df.select_dtypes(include=['object']).columns
categorical_col | code |
34127100/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
lr_score = classifier.score(X_test, y_test)
print(lr_score) | code |
49120184/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
train.groupby('Embarked').size()
test.groupby('Embarked').size()
train.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
test.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
train = train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
test = test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
train = train.astype(float)
test = test.astype(float)
train.dtypes
test.dtypes | code |
49120184/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier()
clf.fit(X_train, y_train)
print('学習スコア', clf.score(X_train, y_train))
print('テストスコア', clf.score(X_val, y_val)) | code |
49120184/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum() | code |
49120184/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
#先ほどの五段階の評価をビジュアライズ
f ,ax = plt.subplots()
train['Age'].value_counts().plot.bar()
train.groupby('Embarked').size()
test.groupby('Embarked').size()
train.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
test.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
train = train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
test = test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
train = train.astype(float)
test = test.astype(float)
train.dtypes
test.dtypes
colormap = plt.cm.RdBu
plt.figure(figsize=(14, 12))
sns.heatmap(train.corr(), linewidths=0.1, cmap=colormap, linecolor='white', annot=True) | code |
49120184/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import lightgbm as lgb
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
#先ほどの五段階の評価をビジュアライズ
f ,ax = plt.subplots()
train['Age'].value_counts().plot.bar()
train.groupby('Embarked').size()
test.groupby('Embarked').size()
train.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
test.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
train = train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
test = test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
train = train.astype(float)
test = test.astype(float)
train.dtypes
test.dtypes
colormap = plt.cm.RdBu
X = train.drop(columns=['Survived'])
y = train['Survived']
X_test = test
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, random_state=0)
import sklearn
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(criterion='entropy', max_depth=5, random_state=0)
model.fit(X_train, y_train)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, y_train)
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
model = lgb.LGBMClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_val)
y_pred_prob = model.predict_proba(X_val)
acc = accuracy_score(y_val, y_pred)
logloss = log_loss(y_val, y_pred_prob)
auc = roc_auc_score(y_val, y_pred_prob[:, 1])
y_pred_1 = model.predict(X_test)
print(y_pred_1.shape) | code |
49120184/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
train.head() | code |
49120184/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import lightgbm as lgb
import sklearn
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(criterion='entropy', max_depth=5, random_state=0)
model.fit(X_train, y_train)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, y_train)
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
model = lgb.LGBMClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_val)
y_pred_prob = model.predict_proba(X_val)
print(model.score(X_train, y_train))
print(model.score(X_val, y_val))
acc = accuracy_score(y_val, y_pred)
print('Acc :', acc)
logloss = log_loss(y_val, y_pred_prob)
print('logloss :', logloss)
auc = roc_auc_score(y_val, y_pred_prob[:, 1])
print('AUC :', auc) | code |
49120184/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import sklearn
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
print('train score:', model.score(X_train, y_train))
print('test score:', model.score(X_val, y_val)) | code |
49120184/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
train.groupby('Embarked').size()
test.groupby('Embarked').size()
train.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
test.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
print(train.shape)
print(test.shape) | code |
49120184/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49120184/cell_32 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from xgboost import XGBClassifier
from xgboost import XGBClassifier
xgb = XGBClassifier(objective='binary:logistic')
xgb.fit(X_train, y_train)
pred = xgb.predict(X_val)
from imblearn.over_sampling import SMOTE
method = SMOTE()
X_resampled, y_resampled = method.fit_sample(X_train, y_train)
xgb.fit(X_resampled, y_resampled)
pred1 = xgb.predict(X_val)
print('Train Score: ', xgb.score(X_resampled, y_resampled))
print('Test Score: ', xgb.score(X_val, y_val)) | code |
49120184/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import sklearn
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(criterion='entropy', max_depth=5, random_state=0)
model.fit(X_train, y_train)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, y_train)
print('正解率(train):{:.3f}'.format(model.score(X_train, y_train)))
print('正解率(test):{:.3f}'.format(model.score(X_val, y_val))) | code |
49120184/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
train.groupby('Embarked').size() | code |
49120184/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.groupby('Embarked').size() | code |
49120184/cell_31 | [
"text_plain_output_1.png"
] | from xgboost import XGBClassifier
from xgboost import XGBClassifier
xgb = XGBClassifier(objective='binary:logistic')
xgb.fit(X_train, y_train)
pred = xgb.predict(X_val)
print(xgb.score(X_train, y_train))
print(xgb.score(X_val, y_val)) | code |
49120184/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
train.groupby('Embarked').size()
test.groupby('Embarked').size()
train.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
test.replace({'Embarked': {'C': 0, 'Q': 1, 'S': 2}}, inplace=True)
train = train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
test = test.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'])
train = train.astype(float)
test = test.astype(float)
train.dtypes
test.dtypes
print(train.shape)
print(test.shape) | code |
49120184/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.isnull().sum()
test.isnull().sum()
train.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
test.replace({'Sex': {'male': 0, 'female': 1}}, inplace=True)
f, ax = plt.subplots()
train['Age'].value_counts().plot.bar() | code |
49120184/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
import sklearn
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(criterion='entropy', max_depth=5, random_state=0)
model.fit(X_train, y_train)
print('正解率(train):{:.3f}'.format(model.score(X_train, y_train)))
print('正解率(test):{:.3f}'.format(model.score(X_val, y_val))) | code |
49120184/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
test.isnull().sum() | code |
73061601/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0, 11)
x = 0.85 ** t
plt.figure(figsize=(12, 12))
plt.subplot(2, 2, 1)
plt.title('Analog Signal', fontsize=20)
plt.plot(t, x, linewidth=3, label='x(t) = (0.85)^t')
plt.xlabel('t', fontsize=15)
plt.ylabel('amplitude', fontsize=15)
plt.legend()
plt.subplot(2, 2, 2)
plt.title('Sampling', fontsize=20)
plt.plot(t, x, linewidth=3, label='x(t) = (0.85)^t')
n = t
markerline, stemlines, baseline = plt.stem(n, x, label='x(n) = (0.85)^n')
plt.setp(stemlines, 'linewidth', 3)
plt.xlabel('n', fontsize=15)
plt.ylabel('amplitude', fontsize=15)
plt.legend()
plt.subplot(2, 2, 3)
plt.title('Quantization', fontsize=20)
plt.plot(t, x, linewidth=3)
markerline, stemlines, baseline = plt.stem(n, x)
plt.setp(stemlines, 'linewidth', 3)
plt.xlabel('n', fontsize=15)
plt.ylabel('Range of Quantizer', fontsize=15)
plt.axhline(y=0.1, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.2, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.3, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.4, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.5, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.6, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.7, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.8, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.9, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=1.0, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.subplot(2, 2, 4)
plt.title('Quantized Signal', fontsize=20)
xq = np.around(x, 1)
markerline, stemlines, baseline = plt.stem(n, xq)
plt.setp(stemlines, 'linewidth', 3)
plt.xlabel('n', fontsize=15)
plt.ylabel('Range of Quantizer', fontsize=15)
plt.axhline(y=0.1, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.2, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.3, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.4, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.5, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.6, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.7, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.8, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=0.9, xmin=0, xmax=10, color='r', linewidth=3.0)
plt.axhline(y=1.0, xmin=0, xmax=10, color='r', linewidth=3.0) | code |
128024272/cell_12 | [
"text_plain_output_1.png"
] | import tensorflow as tf
import tensorflow as tf
def yolov1(input_shape, num_classes):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(64, (7, 7), strides=(2, 2), padding='same', input_shape=input_shape))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.Conv2D(192, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.Conv2D(128, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(256, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(256, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.Conv2D(256, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(256, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(256, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(256, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.Conv2D(512, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(512, (1, 1), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Conv2D(1024, (3, 3), padding='same'))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(4096))
model.add(tf.keras.layers.LeakyReLU(alpha=0.1))
model.add(tf.keras.layers.Dense(7 * 7 * (num_classes + 5)))
return model
input_shape = (448, 448, 3)
num_classes = 20
yolo_model = yolov1(input_shape, num_classes)
yolo_model.summary() | code |
1005801/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import tensorflow as tf
import random
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1005801/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
C = pd.concat([df, t], axis=1)
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
y_train = df['Survived'].values
x_train = df[features].values
print(len(x_train)) | code |
88098005/cell_4 | [
"text_html_output_1.png"
] | ! pip install -q git+https://github.com/tensorflow/docs | code |
88098005/cell_33 | [
"text_plain_output_1.png"
] | from IPython.display import HTML, display
import cv2
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
KEYPOINT_EDGE_INDS_TO_COLOR = {(0, 1): 'm', (0, 2): 'c', (1, 3): 'm', (2, 4): 'c', (0, 5): 'm', (0, 6): 'c', (5, 7): 'm', (7, 9): 'm', (6, 8): 'c', (8, 10): 'c', (5, 6): 'y', (5, 11): 'm', (6, 12): 'c', (11, 12): 'y', (11, 13): 'm', (13, 15): 'm', (12, 14): 'c', (14, 16): 'c'}
model = hub.load('https://tfhub.dev/google/movenet/multipose/lightning/1')
movenet = model.signatures['serving_default']
def loop(frame, keypoints, threshold=0.11):
pass
def draw_keypoints(frame, keypoints, threshold=0.11):
width, height, _ = frame.shape
shaped = np.squeeze(np.multiply(keypoints, [width, height, 1]))
for kp in shaped:
ky, kx, kp_conf = kp
if kp_conf > threshold:
cv2.circle(frame, (int(kx), int(ky)), 4, (255, 0, 0), -1)
def draw_edges(frame, keypoints, edges, threshold=0.11):
y, x, c = frame.shape
shaped = np.squeeze(np.multiply(keypoints, [y, x, 1]))
for edge, color in edges.items():
p1, p2 = edge
y1, x1, c1 = shaped[p1]
y2, x2, c2 = shaped[p2]
if (c1 > threshold) & (c2 > threshold):
cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 4)
def progress(value, max=100):
return HTML("\n <progress\n value='{value}'\n max='{max}',\n style='width: 100%'\n >\n {value}\n </progress>\n ".format(value=value, max=max))
gif = cv2.VideoCapture('./ngannou.gif')
frame_count = int(gif.get(cv2.CAP_PROP_FRAME_COUNT))
output_frames = []
def run_inference():
#Set the progress bar to 0. It ranges from the first to the last frame
bar = display(progress(0, frame_count-1), display_id=True)
while gif.isOpened():
#Capture the frame
ret, frame = gif.read()
#Process the frame : resize to the input size
if frame is None:
break
#Retrieve the frame index
index = gif.get(cv2.CAP_PROP_POS_FRAMES)
image = frame.copy()
image = tf.cast(tf.image.resize_with_pad(image, 256, 256), dtype=tf.int32)
input_image = tf.expand_dims(image, axis=0)
#Perform inference
results = movenet(input_image)
keypoints = results['output_0'].numpy()[:,:,:51].reshape((6,17,3))
#Loop through the results
loop(frame, keypoints, threshold=0.11)
#Add the drawings to the output frames
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) #OpenCV processes BGR images instead of RGB
output_frames.append(frame_rgb)
#Update the progress bar
bar.update(progress(index, frame_count-1))
#Release the object
gif.release()
run_inference() | code |
88098005/cell_28 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np
def draw_keypoints(frame, keypoints, threshold=0.11):
width, height, _ = frame.shape
shaped = np.squeeze(np.multiply(keypoints, [width, height, 1]))
for kp in shaped:
ky, kx, kp_conf = kp
if kp_conf > threshold:
cv2.circle(frame, (int(kx), int(ky)), 4, (255, 0, 0), -1)
def draw_edges(frame, keypoints, edges, threshold=0.11):
y, x, c = frame.shape
shaped = np.squeeze(np.multiply(keypoints, [y, x, 1]))
for edge, color in edges.items():
p1, p2 = edge
y1, x1, c1 = shaped[p1]
y2, x2, c2 = shaped[p2]
if (c1 > threshold) & (c2 > threshold):
cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 4)
gif = cv2.VideoCapture('./ngannou.gif')
frame_count = int(gif.get(cv2.CAP_PROP_FRAME_COUNT))
print(f'Frame count: {frame_count}') | code |
88098005/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | ! wget -O ngannou.gif https://raw.githubusercontent.com/Justsecret123/Human-pose-estimation/main/Test%20gifs/Ngannou_takedown.gif | code |
88098005/cell_12 | [
"text_plain_output_1.png"
] | import tensorflow_hub as hub
model = hub.load('https://tfhub.dev/google/movenet/multipose/lightning/1')
movenet = model.signatures['serving_default'] | code |
88098005/cell_36 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from IPython.display import HTML, display
from tensorflow_docs.vis import embed
import cv2
import imageio
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
KEYPOINT_EDGE_INDS_TO_COLOR = {(0, 1): 'm', (0, 2): 'c', (1, 3): 'm', (2, 4): 'c', (0, 5): 'm', (0, 6): 'c', (5, 7): 'm', (7, 9): 'm', (6, 8): 'c', (8, 10): 'c', (5, 6): 'y', (5, 11): 'm', (6, 12): 'c', (11, 12): 'y', (11, 13): 'm', (13, 15): 'm', (12, 14): 'c', (14, 16): 'c'}
model = hub.load('https://tfhub.dev/google/movenet/multipose/lightning/1')
movenet = model.signatures['serving_default']
def loop(frame, keypoints, threshold=0.11):
pass
def draw_keypoints(frame, keypoints, threshold=0.11):
width, height, _ = frame.shape
shaped = np.squeeze(np.multiply(keypoints, [width, height, 1]))
for kp in shaped:
ky, kx, kp_conf = kp
if kp_conf > threshold:
cv2.circle(frame, (int(kx), int(ky)), 4, (255, 0, 0), -1)
def draw_edges(frame, keypoints, edges, threshold=0.11):
y, x, c = frame.shape
shaped = np.squeeze(np.multiply(keypoints, [y, x, 1]))
for edge, color in edges.items():
p1, p2 = edge
y1, x1, c1 = shaped[p1]
y2, x2, c2 = shaped[p2]
if (c1 > threshold) & (c2 > threshold):
cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 4)
def progress(value, max=100):
return HTML("\n <progress\n value='{value}'\n max='{max}',\n style='width: 100%'\n >\n {value}\n </progress>\n ".format(value=value, max=max))
gif = cv2.VideoCapture('./ngannou.gif')
frame_count = int(gif.get(cv2.CAP_PROP_FRAME_COUNT))
output_frames = []
def run_inference():
#Set the progress bar to 0. It ranges from the first to the last frame
bar = display(progress(0, frame_count-1), display_id=True)
while gif.isOpened():
#Capture the frame
ret, frame = gif.read()
#Process the frame : resize to the input size
if frame is None:
break
#Retrieve the frame index
index = gif.get(cv2.CAP_PROP_POS_FRAMES)
image = frame.copy()
image = tf.cast(tf.image.resize_with_pad(image, 256, 256), dtype=tf.int32)
input_image = tf.expand_dims(image, axis=0)
#Perform inference
results = movenet(input_image)
keypoints = results['output_0'].numpy()[:,:,:51].reshape((6,17,3))
#Loop through the results
loop(frame, keypoints, threshold=0.11)
#Add the drawings to the output frames
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) #OpenCV processes BGR images instead of RGB
output_frames.append(frame_rgb)
#Update the progress bar
bar.update(progress(index, frame_count-1))
#Release the object
gif.release()
output = np.stack(output_frames, axis=0)
imageio.mimsave('./animation.gif', output, fps=10)
embed.embed_file('./animation.gif') | code |
16116561/cell_13 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score,recall_score,f1_score,roc_auc_score,roc_curve
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
y.value_counts()
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=0)
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
rnd_clf = RandomForestClassifier(random_state=100)
param_grid = {'n_estimators': [100, 150], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']}
CV_rfc = GridSearchCV(estimator=rnd_clf, param_grid=param_grid, cv=5)
rnd_cv_fit = CV_rfc.fit(X_train_res, y_train_res)
rnd = RandomForestClassifier(random_state=100, n_estimators=150, criterion='gini', max_depth=8, max_features='log2')
rnd_fit = rnd_clf.fit(X_train_res, y_train_res)
y_test_fit = rnd_fit.predict(X_test)
roc_curve(y_test, y_test_fit)
fpr, tpr, threshold = roc_curve(y_test, y_test_fit)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show() | code |
16116561/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
pd.value_counts(y).plot.bar()
plt.title('Data on star detection')
plt.xlabel('Class')
plt.ylabel('Frequency')
y.value_counts() | code |
16116561/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
y.value_counts()
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=0)
print('Number transactions X_train dataset: ', X_train.shape)
print('Number transactions y_train dataset: ', y_train.shape)
print('Number transactions X_test dataset: ', X_test.shape)
print('Number transactions y_test dataset: ', y_test.shape) | code |
16116561/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
y.value_counts()
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=0)
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
rnd_clf = RandomForestClassifier(random_state=100)
param_grid = {'n_estimators': [100, 150], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']}
CV_rfc = GridSearchCV(estimator=rnd_clf, param_grid=param_grid, cv=5)
rnd_cv_fit = CV_rfc.fit(X_train_res, y_train_res)
rnd = RandomForestClassifier(random_state=100, n_estimators=150, criterion='gini', max_depth=8, max_features='log2')
rnd_fit = rnd_clf.fit(X_train_res, y_train_res)
y_test_fit = rnd_fit.predict(X_test) | code |
16116561/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, roc_curve
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from imblearn.over_sampling import SMOTE
import matplotlib.pyplot as plt | code |
16116561/cell_8 | [
"image_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
y.value_counts()
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=0)
print("Before OverSampling, counts of label '1': {}".format(sum(y_train == 1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train == 0)))
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
print('After OverSampling, the shape of train_X: {}'.format(X_train_res.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train_res.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train_res == 1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train_res == 0))) | code |
16116561/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape | code |
16116561/cell_10 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
y.value_counts()
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=0)
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
rnd_clf = RandomForestClassifier(random_state=100)
param_grid = {'n_estimators': [100, 150], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']}
CV_rfc = GridSearchCV(estimator=rnd_clf, param_grid=param_grid, cv=5)
rnd_cv_fit = CV_rfc.fit(X_train_res, y_train_res)
CV_rfc.best_params_ | code |
16116561/cell_12 | [
"text_plain_output_1.png"
] | from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score,recall_score,f1_score,roc_auc_score,roc_curve
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
file = pd.read_csv('../input/pulsar_stars.csv')
y = file.target_class
X = file[file.columns[:8]]
X.shape
y.value_counts()
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X.astype(np.float64))
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=0)
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
rnd_clf = RandomForestClassifier(random_state=100)
param_grid = {'n_estimators': [100, 150], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [5, 6, 7, 8], 'criterion': ['gini', 'entropy']}
CV_rfc = GridSearchCV(estimator=rnd_clf, param_grid=param_grid, cv=5)
rnd_cv_fit = CV_rfc.fit(X_train_res, y_train_res)
rnd = RandomForestClassifier(random_state=100, n_estimators=150, criterion='gini', max_depth=8, max_features='log2')
rnd_fit = rnd_clf.fit(X_train_res, y_train_res)
y_test_fit = rnd_fit.predict(X_test)
print('Cross-Validated Accuracy on 3 cv sets:', cross_val_score(rnd, X_test, y_test, cv=3, scoring='accuracy'))
print('Precision Score:', precision_score(y_test, y_test_fit))
print('Recall Score:', recall_score(y_test, y_test_fit))
print('F1-score:', f1_score(y_test, y_test_fit)) | code |
17123947/cell_4 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql import SparkSession
my_spark = SparkSession.builder.getOrCreate()
print(my_spark) | code |
17123947/cell_23 | [
"text_plain_output_1.png"
] | from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql import SparkSession
my_spark = SparkSession.builder.getOrCreate()
file_path = '../input/flights.csv'
flights = my_spark.read.csv(file_path, header=True)
flights.createOrReplaceTempView('flights')
flights = flights.withColumn('duration_hrs', flights.air_time / 60)
flights.toPandas().shape[0]
file_path = '../input/planes.csv'
planes = my_spark.read.csv(file_path, header=True)
planes = planes.withColumnRenamed('year', 'plane_year')
model_data = flights.join(planes, on='tailnum', how='leftouter')
model_data = model_data.withColumn('arr_delay', model_data.arr_delay.cast('integer'))
model_data = model_data.withColumn('air_time', model_data.air_time.cast('integer'))
model_data = model_data.withColumn('month', model_data.month.cast('integer'))
model_data = model_data.withColumn('plane_year', model_data.plane_year.cast('integer'))
model_data = model_data.withColumn('plane_age', model_data.year - model_data.plane_year)
model_data = model_data.withColumn('is_late', model_data.arr_delay > 0)
model_data = model_data.withColumn('label', model_data.is_late.cast('integer'))
model_data = model_data.filter('arr_delay is not NULL and dep_delay is not NULL and air_time is not NULL and plane_year is not NULL')
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler
carr_indexer = StringIndexer(inputCol='carrier', outputCol='carrier_index')
carr_encoder = OneHotEncoder(inputCol='carrier_index', outputCol='carrier_fact')
dest_indexer = StringIndexer(inputCol='dest', outputCol='dest_index')
dest_encoder = OneHotEncoder(inputCol='dest_index', outputCol='dest_fact')
vec_assembler = VectorAssembler(inputCols=['month', 'air_time', 'carrier_fact', 'dest_fact', 'plane_age'], outputCol='features')
from pyspark.ml import Pipeline
flights_pipe = Pipeline(stages=[dest_indexer, dest_encoder, carr_indexer, carr_encoder, vec_assembler])
piped_data = flights_pipe.fit(model_data).transform(model_data)
piped_data.toPandas().head(3) | code |
17123947/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pyspark.ml.classification import LogisticRegression
import numpy as np
import numpy as np # linear algebra
import pyspark.ml.evaluation as evals
import pyspark.ml.tuning as tune
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression()
import pyspark.ml.evaluation as evals
evaluator = evals.BinaryClassificationEvaluator(metricName='areaUnderROC')
import pyspark.ml.tuning as tune
grid = tune.ParamGridBuilder()
grid = grid.addGrid(lr.regParam, np.arange(0, 0.1, 0.01))
grid = grid.addGrid(lr.elasticNetParam, [0, 1])
grid = grid.build()
best_lr = lr.fit(training)
test_results = best_lr.transform(test)
print(evaluator.evaluate(test_results)) | code |
17123947/cell_6 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql import SparkSession
my_spark = SparkSession.builder.getOrCreate()
file_path = '../input/flights.csv'
flights = my_spark.read.csv(file_path, header=True)
flights.show()
print(my_spark.catalog.listTables())
flights.createOrReplaceTempView('flights')
print(my_spark.catalog.listTables()) | code |
17123947/cell_29 | [
"text_plain_output_1.png"
] | from pyspark.ml.classification import LogisticRegression
import numpy as np
import numpy as np # linear algebra
import pyspark.ml.tuning as tune
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression()
import pyspark.ml.tuning as tune
grid = tune.ParamGridBuilder()
grid = grid.addGrid(lr.regParam, np.arange(0, 0.1, 0.01))
grid = grid.addGrid(lr.elasticNetParam, [0, 1])
grid = grid.build()
best_lr = lr.fit(training)
print(best_lr) | code |
17123947/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
17123947/cell_8 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql import SparkSession
my_spark = SparkSession.builder.getOrCreate()
file_path = '../input/flights.csv'
flights = my_spark.read.csv(file_path, header=True)
flights.createOrReplaceTempView('flights')
flights = flights.withColumn('duration_hrs', flights.air_time / 60)
flights.toPandas().shape[0] | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.