path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32068245/cell_16
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data from sklearn.linear_model import LinearRegression linear_reg = LinearRegression() x = data.deneyim.values.reshape(-1, 1) y = data.maas.values.reshape(-1, 1) linear_reg.fit(x, y) import numpy as np a = linear_reg.predict([[0]]) print('a: ', a) a_ = linear_reg.intercept_ print('a_: ', a_)
code
32068245/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data from sklearn.linear_model import LinearRegression linear_reg = LinearRegression() x = data.deneyim.values.reshape(-1, 1) y = data.maas.values.reshape(-1, 1) linear_reg.fit(x, y)
code
32068245/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data from sklearn.linear_model import LinearRegression linear_reg = LinearRegression() x = data.deneyim.values.reshape(-1, 1) y = data.maas.values.reshape(-1, 1) linear_reg.fit(x, y) import numpy as np a = linear_reg.predict([[0]]) a_ = linear_reg.intercept_ b = linear_reg.coef_ maas_yeni = 1663 + 1138 * 11 array = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]).reshape(-1, 1) plt.scatter(x, y) y_head = linear_reg.predict(array) plt.plot(array, y_head, color='red') plt.show()
code
32068245/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/sinan-dataset/multiple_linear_regression_dataset.csv', sep=';') data plt.scatter(data.deneyim, data.maas) plt.xlabel('deneyim') plt.ylabel('maas') plt.show()
code
128037874/cell_21
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) class_categories = set(coasts['classes_array']) class_categories
code
128037874/cell_13
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files data['image'].shape
code
128037874/cell_9
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' print(type(coasts.iloc[0])) coasts.shape
code
128037874/cell_25
[ "text_plain_output_1.png" ]
import shutil import shutil shutil.make_archive('coast_images', 'zip', 'coast_images') shutil.make_archive('coast_labels', 'zip', 'coast_labels') shutil.make_archive('coast_info', 'zip', 'coast_info')
code
128037874/cell_4
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.describe()
code
128037874/cell_23
[ "image_output_1.png" ]
!mkdir coast_images !mkdir coast_labels !mkdir coast_info
code
128037874/cell_6
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0]
code
128037874/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape
code
128037874/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files for l in lst: print(l, type(data[l]))
code
128037874/cell_19
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) coasts['classes_array'].value_counts()
code
128037874/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from PIL import Image import matplotlib.pyplot as plt import os "\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n"
code
128037874/cell_7
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts print(coasts.index) for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df print(len(class_index)) '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n'
code
128037874/cell_18
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files np.argmax(data['label'], axis=2) label = data['label'] new_label = np.argmax(label, axis=2) print(new_label.shape) print(new_label[0, 500]) print(data['0label'][0, 500]) print(new_label)
code
128037874/cell_8
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.describe()
code
128037874/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files np.argmax(data['label'], axis=2)
code
128037874/cell_16
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files print(data['classes']) print(first_coast['classes_array'])
code
128037874/cell_3
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.head()
code
128037874/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files print(data['label'].shape)
code
128037874/cell_24
[ "text_plain_output_1.png" ]
from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files np.argmax(data['label'], axis=2) label = data['label'] new_label = np.argmax(label, axis=2) def get_mapping(classes): mapping = dict() if classes == "'water', 'herbaceous vegetation', 'woody vegetation', 'non-vegetated-dry', 'non-vegetated-wet', 'surf', 'developed'": mapping = {0: 0, 1: 4, 2: 4, 3: 3, 4: 3, 5: 1, 6: 5} elif classes == "'water', 'sand', 'gravel', 'cobble_boulder', 'vegetated', 'coastal_defense', 'buildings', 'pavement_road', 'vehicles', 'people', 'other_anthro', 'nodata'": mapping = {0: 0, 1: 2, 2: 2, 3: 3, 4: 4, 5: 5, 6: 5, 7: 5, 8: 5, 9: 5, 10: 5, 11: 6} elif classes == "'water', 'sand', 'gravel', 'cobble_boulder', 'vegetated', 'development', 'nodata', 'unusual'": mapping = {0: 0, 1: 2, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 6} elif classes == "'water', 'surf', 'bare_ground', 'vegetated_ground', 'development', 'no_data'": mapping = {0: 0, 1: 1, 2: 3, 3: 4, 4: 5, 5: 6} elif classes == "'water', 'surf', 'bare_ground', 'vegtated_ground', 'development', 'nodata'": mapping = {0: 0, 1: 1, 2: 3, 3: 4, 4: 5, 5: 6} elif classes == "'water', 'surf', 'sand', 'dev'": mapping = {0: 0, 1: 1, 2: 2, 3: 5} elif classes == "'water', 'whitewater', 'mud_silt', 'sand', 'gravel_shell', 'cobble_boulder', 'bedrock', 'ice_snow', 'vegetated', 'development', 'other'": mapping = {0: 0, 1: 4, 2: 3, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 4, 9: 5, 10: 6} elif classes == "'water', 'whitewater', 'sediment', 'other_bare_natural_terrain', 'marsh_vegetation', 'terrestrial_vegetation', 'agricultural', 'development', 'nodata', 'unusual', 'unknown'": mapping = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 4, 6: 4, 7: 5, 8: 6, 9: 6, 10: 6} elif classes == "'water', 'whitewater', 'sediment', 'other_natural_terrain', 'vegetated_surface', 'agricultural', 'development', 'cloud', 'nodata', 'unusual', 'unknown'": mapping = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 4, 6: 5, 7: 6, 8: 6, 9: 6, 10: 6} elif classes == "'water', 'whitewater', 'sediment', 'sediment_plume', 'other_natural_terrain', 'vegetated_surface', 'agricultural', 'development', 'cloud', 'nodata', 'unusual', 'unknown'": mapping = {0: 0, 1: 1, 2: 2, 3: 2, 4: 3, 5: 4, 6: 4, 7: 5, 8: 6, 9: 6, 10: 6, 11: 6} elif classes == "'water', 'whitewater', 'sediment', 'terrestrial_vegetation', 'marsh_vegetation', 'development', 'unusual', 'unknown', 'nodata'": mapping = {0: 0, 1: 1, 2: 2, 3: 4, 4: 4, 5: 5, 6: 6, 7: 6, 8: 6} return mapping import pickle classes_list = ['water', 'whitewater', 'sediment', 'other_natural_terrain', 'vegetation', 'development', 'unknown'] from tqdm import tqdm count = 0 for i in tqdm(range(coasts.shape[0])): coast = coasts.iloc[i] classes_array = coast['classes_array'] directory = coast['name'].split('.')[0] dir_names = coast['images'].split('/')[-1].split('.')[:-1] filename = '' for s in dir_names: filename += s + '.' filename = filename + 'npz' if os.path.exists(os.path.join(base_dir, directory, filename)): data = np.load(os.path.join(base_dir, directory, filename)) elif os.path.exists(os.path.join(base_dir, directory, filename[:-10] + '.npz')): data = np.load(os.path.join(base_dir, directory, filename[:-10] + '.npz')) else: print(directory) print(filename) continue try: image = data['orig_image'][:, :, [0, 1, 2]] except: image = data['image'][:, :, [0, 1, 2]] if not (image.shape[0] == 1024 and image.shape[1] == 1024) and (not (image.shape[0] == 2048 and image.shape[1] == 2048)): continue count += 1 label = data['label'] if len(label.shape) != 3: print(directory) print(filename) label = np.argmax(label, axis=2) mapping = get_mapping(classes_array) normalized_label = label for x in range(label.shape[0]): for y in range(label.shape[1]): normalized_label[x, y] = mapping[normalized_label[x, y]] class_count = {c: 0 for c in classes_list} for class_index in range(len(classes_list)): class_count[classes_list[class_index]] = int(np.count_nonzero(normalized_label == class_index)) assert int(sum(list(class_count.values()))) == label.shape[0] * label.shape[1] with open('coast_images/image_' + str(count) + '.npy', 'wb') as f: np.save(f, image) with open('coast_labels/image_' + str(count) + '_label.npy', 'wb') as fl: np.save(fl, normalized_label) with open('coast_info/image_' + str(count) + '_info.pkl', 'wb') as fi: pickle.dump(class_count, fi) print(count)
code
128037874/cell_14
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files img = Image.fromarray(data['orig_image'][:, :, [0, 1, 2]]) plt.imshow(img, interpolation='nearest') plt.show()
code
128037874/cell_10
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] print(first_coast['name']) directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz'))
code
128037874/cell_12
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts() coasts.iloc[0] class_set = set() class_index = [] coasts_df = coasts for i in range(coasts.shape[0]): if coasts.loc[i, 'name'].startswith('Ortho'): class_index.append(i) '\n if (coasts.loc[i, "name"].startswith("Ortho") or coasts.loc[i, "name"].startswith("NAIP")) and not coasts.loc[i, "name"].startswith("Orthophoto_8_001"):# or coasts.loc[i, "name"].startswith("Quad"):\n class_index.append(i)\n ' coasts_df = coasts.iloc[class_index] coasts = coasts_df '\ncoasts.set_index(range(coasts.shape[0]))\nprint(coasts_df.shape)\ncoasts_df["classes_array"].value_counts()\n' coasts.shape first_coast = coasts.iloc[3] directory = first_coast['name'].split('.')[0] filename = first_coast['images'].split('/')[-1].split('.')[-2] data = np.load(os.path.join(base_dir, directory, filename + '.npz')) lst = data.files list(data['classes'])
code
128037874/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) base_dir = '/kaggle/input/coast-data' coasts = pd.read_csv(os.path.join(base_dir, 'CoastTrain_imagery_details.csv')) coasts.shape coasts.name.value_counts()
code
106202263/cell_13
[ "text_plain_output_1.png" ]
another_list = [5, True, 'tree', 'tree'] print(another_list)
code
106202263/cell_15
[ "text_plain_output_1.png" ]
mylist = ['banana', 'cherry', 'apple'] print(mylist) print(mylist[0]) print(mylist[1]) print(mylist[2]) print(mylist[-1]) print(mylist[-2])
code
106202263/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
mylist = ['banana', 'cherry', 'apple'] print(mylist[4])
code
106202263/cell_10
[ "text_plain_output_1.png" ]
mylist = ['banana', 'cherry', 'apple'] print(mylist)
code
106202263/cell_12
[ "text_plain_output_1.png" ]
newlist = list() print(newlist)
code
17115822/cell_13
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn import preprocessing x = data[feature_columns].values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) data[feature_columns] = pd.DataFrame(x_scaled) fig = plt.figure(figsize=(30, 20)) ax = fig.gca() hist = test_data[feature_columns].hist(ax=ax)
code
17115822/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize=(30, 20)) ax = fig.gca() hist = data.hist(ax=ax)
code
17115822/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score if y_test is not None: y_pred_log_reg = log_reg.predict(X_test) confusion_matrix_log_reg = confusion_matrix(y_test, y_pred_log_reg) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_n_net = n_net.predict(X_test) confusion_matrix_n_net = confusion_matrix(y_test, y_pred_n_net) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_svm = svmC.predict(X_test) confusion_matrix_svm = confusion_matrix(y_test, y_pred_svm) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_k_NN = k_NN.predict(X_test) confusion_matrix_k_NN = confusion_matrix(y_test, y_pred_k_NN) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_rfc = rfc.predict(X_test) confusion_matrix_rfc = confusion_matrix(y_test, y_pred_rfc) sn.heatmap(confusion_matrix_rfc, annot=True, cmap='Blues', fmt='g') target_names = ['Survived', 'Not Survived'] print(classification_report(y_test, y_pred_rfc, target_names=target_names))
code
17115822/cell_26
[ "image_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score if y_test is not None: y_pred_log_reg = log_reg.predict(X_test) confusion_matrix_log_reg = confusion_matrix(y_test, y_pred_log_reg) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_n_net = n_net.predict(X_test) confusion_matrix_n_net = confusion_matrix(y_test, y_pred_n_net) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_svm = svmC.predict(X_test) confusion_matrix_svm = confusion_matrix(y_test, y_pred_svm) sn.heatmap(confusion_matrix_svm, annot=True, cmap='Blues', fmt='g') target_names = ['Survived', 'Not Survived'] print(classification_report(y_test, y_pred_svm, target_names=target_names))
code
17115822/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] data[feature_columns].head()
code
17115822/cell_19
[ "text_html_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train)
code
17115822/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sn import os print(os.listdir('../input'))
code
17115822/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Cabin'].head()
code
17115822/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score if y_test is not None: y_pred_log_reg = log_reg.predict(X_test) confusion_matrix_log_reg = confusion_matrix(y_test, y_pred_log_reg) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_n_net = n_net.predict(X_test) confusion_matrix_n_net = confusion_matrix(y_test, y_pred_n_net) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_svm = svmC.predict(X_test) confusion_matrix_svm = confusion_matrix(y_test, y_pred_svm) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_k_NN = k_NN.predict(X_test) confusion_matrix_k_NN = confusion_matrix(y_test, y_pred_k_NN) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_rfc = rfc.predict(X_test) confusion_matrix_rfc = confusion_matrix(y_test, y_pred_rfc) target_names = ['Survived', 'Not Survived'] if y_test is not None: print('Accuracy Logistic Regression:', accuracy_score(y_test, y_pred_log_reg)) print('Accuracy Neural Net:', accuracy_score(y_test, y_pred_n_net)) print('Accuracy Support Vector Machine:', accuracy_score(y_test, y_pred_svm)) print('Accuracy kNN:', accuracy_score(y_test, y_pred_k_NN)) print('Accuracy Random Forest:', accuracy_score(y_test, y_pred_rfc))
code
17115822/cell_28
[ "text_plain_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score if y_test is not None: y_pred_log_reg = log_reg.predict(X_test) confusion_matrix_log_reg = confusion_matrix(y_test, y_pred_log_reg) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_n_net = n_net.predict(X_test) confusion_matrix_n_net = confusion_matrix(y_test, y_pred_n_net) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_svm = svmC.predict(X_test) confusion_matrix_svm = confusion_matrix(y_test, y_pred_svm) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_k_NN = k_NN.predict(X_test) confusion_matrix_k_NN = confusion_matrix(y_test, y_pred_k_NN) sn.heatmap(confusion_matrix_k_NN, annot=True, cmap='Blues', fmt='g') target_names = ['Survived', 'Not Survived'] print(classification_report(y_test, y_pred_k_NN, target_names=target_names))
code
17115822/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) data.head()
code
17115822/cell_15
[ "text_html_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn import preprocessing #Training Data x = data[feature_columns].values min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) data[feature_columns] = pd.DataFrame(x_scaled) #Submission Data # x = test_data[feature_columns].values # min_max_scaler = preprocessing.MinMaxScaler() # x_scaled = min_max_scaler.fit_transform(x) # test_data[feature_columns] = pd.DataFrame(x_scaled) fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = test_data[feature_columns].hist(ax=ax) f = plt.figure(figsize=(19, 15)) plt.matshow(data[feature_columns].corr(), fignum=f.number) plt.xticks(range(data[feature_columns].shape[1]), data[feature_columns].columns, fontsize=14, rotation=45) plt.yticks(range(data[feature_columns].shape[1]), data[feature_columns].columns, fontsize=14) cb = plt.colorbar() cb.ax.tick_params(labelsize=14) plt.title('Correlation Matrix', fontsize=16)
code
17115822/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') data.head() test_data.head()
code
17115822/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape
code
17115822/cell_24
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score if y_test is not None: y_pred_log_reg = log_reg.predict(X_test) confusion_matrix_log_reg = confusion_matrix(y_test, y_pred_log_reg) target_names = ['Survived', 'Not Survived'] if y_test is not None: y_pred_n_net = n_net.predict(X_test) confusion_matrix_n_net = confusion_matrix(y_test, y_pred_n_net) sn.heatmap(confusion_matrix_n_net, annot=True, cmap='Blues', fmt='g') target_names = ['Survived', 'Not Survived'] print(classification_report(y_test, y_pred_n_net, target_names=target_names)) print('Accuracy:', accuracy_score(y_test, y_pred_log_reg))
code
17115822/cell_22
[ "text_html_output_1.png" ]
from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data['Sex'] = data['Sex'].map({'female': 1, 'male': 0}) data['Age'].fillna(data['Age'].mean(), inplace=True) data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(data['Embarked'], prefix=None) data = pd.concat([data, one_hot_columns], axis=1) data.drop(['Embarked'], axis=1, inplace=True) import re def ticket_to_float(ticket_str): ticket_numbers_only = ''.join((i for i in ticket_str if i.isdigit())) if ticket_numbers_only is '': return 0 return int(ticket_numbers_only) data['Ticket'] = data['Ticket'].apply(ticket_to_float) test_data['Sex'] = test_data['Sex'].map({'female': 1, 'male': 0}) test_data['Age'].fillna(test_data['Age'].mean(), inplace=True) test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True) test_data['Embarked'].fillna(method='ffill', inplace=True) one_hot_columns = pd.get_dummies(test_data['Embarked'], prefix=None) test_data = pd.concat([test_data, one_hot_columns], axis=1) test_data.drop(['Embarked'], axis=1, inplace=True) test_data['Ticket'] = test_data['Ticket'].apply(ticket_to_float) feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'C', 'Q', 'S'] from sklearn.model_selection import train_test_split X = data[feature_columns] y = data['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) X_train.shape from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier log_reg = LogisticRegression(solver='sag', random_state=0) log_reg.fit(X_train, y_train) n_net = MLPClassifier(hidden_layer_sizes=(4, 4, 4), max_iter=500) n_net.fit(X_train, y_train) svmC = svm.SVC(kernel='linear') svmC.fit(X_train, y_train) k_NN = KNeighborsClassifier(n_neighbors=3) k_NN.fit(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score if y_test is not None: y_pred_log_reg = log_reg.predict(X_test) confusion_matrix_log_reg = confusion_matrix(y_test, y_pred_log_reg) sn.heatmap(confusion_matrix_log_reg, annot=True, cmap='Blues', fmt='g') target_names = ['Survived', 'Not Survived'] print(classification_report(y_test, y_pred_log_reg, target_names=target_names))
code
17115822/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') fig = plt.figure(figsize = (30,20)) ax = fig.gca() hist = data.hist(ax=ax) data.describe()
code
88101152/cell_4
[ "text_plain_output_1.png" ]
import math class Panorama: """Class that represents a picture returned by Google Street View Static API""" def __init__(self, width=640, height=640, fov=120, heading=0, pitch=0): self.width = width self.height = height self.fov = fov self.heading = heading self.pitch = pitch def unmap(self, heading, pitch): """Returns the pixel coordinates corresponding to the given heading and pitch offsets""" fov = self.fov * math.pi / 180.0 width = self.width height = self.height f = 0.5 * width / math.tan(0.5 * fov) h = heading * math.pi / 180.0 p = pitch * math.pi / 180.0 x = f * math.cos(p) * math.sin(h) y = f * math.cos(p) * math.cos(h) z = f * math.sin(p) h0 = self.heading * math.pi / 180.0 p0 = self.pitch * math.pi / 180.0 x0 = f * math.cos(p0) * math.sin(h0) y0 = f * math.cos(p0) * math.cos(h0) z0 = f * math.sin(p0) t = f * f / (x0 * x + y0 * y + z0 * z) ux = math.copysign(math.cos(h0), math.cos(p0)) uy = -math.copysign(math.sin(h0), math.cos(p0)) uz = 0 vx = -math.sin(p0) * math.sin(h0) vy = -math.sin(p0) * math.cos(h0) vz = math.cos(p0) x1 = t * x y1 = t * y z1 = t * z dx10 = x1 - x0 dy10 = y1 - y0 dz10 = z1 - z0 du = ux * dx10 + uy * dy10 + uz * dz10 dv = vx * dx10 + vy * dy10 + vz * dz10 return (du + width / 2, height / 2 - dv) def map(self, u, v): """Returns a (heading, pitch) tuple corresponding to the given (x, y) offset in pixels""" fov = self.fov * math.pi / 180.0 width = self.width height = self.height h0 = self.heading * math.pi / 180.0 p0 = self.pitch * math.pi / 180.0 f = 0.5 * width / math.tan(0.5 * fov) x0 = f * math.cos(p0) * math.sin(h0) y0 = f * math.cos(p0) * math.cos(h0) z0 = f * math.sin(p0) du = u - width / 2 dv = height / 2 - v ux = math.copysign(math.cos(h0), math.cos(p0)) uy = -math.copysign(math.sin(h0), math.cos(p0)) uz = 0 vx = -math.sin(p0) * math.sin(h0) vy = -math.sin(p0) * math.cos(h0) vz = math.cos(p0) x = x0 + du * ux + dv * vx y = y0 + du * uy + dv * vy z = z0 + du * uz + dv * vz R = math.sqrt(x * x + y * y + z * z) h = math.atan2(x, y) p = math.asin(z / R) return (h * 180.0 / math.pi, p * 180.0 / math.pi) p = Panorama() print(p.unmap(-17, 16)) print(p.map(264, 264)) print(p.unmap(-57, 40)) print(p.map(40, 40))
code
122251788/cell_13
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix import numpy as np # linear algebra train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10) score = model.evaluate(test_generator) predict = np.argmax(model.predict(test_generator), axis=1) cnf_matrix = confusion_matrix(test_generator.labels, predict) print(cnf_matrix)
code
122251788/cell_9
[ "image_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10)
code
122251788/cell_4
[ "text_plain_output_1.png" ]
from PIL import ImageFile from keras.preprocessing.image import ImageDataGenerator train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42)
code
122251788/cell_11
[ "text_plain_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10) score = model.evaluate(test_generator) print('Test loss:', score[0]) print('Test accuracy:', score[1])
code
122251788/cell_1
[ "text_plain_output_1.png" ]
from os import walk import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import confusion_matrix import os from os import walk for dirpath, dirnames, filenames in walk('../input/yoga-poses-dataset/DATASET'): print('Directory path: ', dirpath)
code
122251788/cell_7
[ "text_plain_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.preprocessing.image import ImageDataGenerator train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary()
code
122251788/cell_15
[ "text_plain_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10) plt.gca().set_ylim(0, 1) score = model.evaluate(test_generator) predict = np.argmax(model.predict(test_generator), axis=1) cnf_matrix = confusion_matrix(test_generator.labels, predict) ax = plt.subplot() sns.heatmap(cnf_matrix, annot=True, fmt='g', ax=ax) ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') ax.xaxis.set_ticklabels(['downdog', 'goddess', 'plank', 'tree', 'warrior2']) ax.yaxis.set_ticklabels(['downdog', 'goddess', 'plank', 'tree', 'warrior2'])
code
122251788/cell_14
[ "image_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix import numpy as np # linear algebra train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10) score = model.evaluate(test_generator) predict = np.argmax(model.predict(test_generator), axis=1) cnf_matrix = confusion_matrix(test_generator.labels, predict) test_generator.class_indices
code
122251788/cell_10
[ "text_plain_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10) pd.DataFrame(history.history).plot(figsize=(21, 10)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show()
code
122251788/cell_12
[ "text_plain_output_1.png" ]
from PIL import ImageFile from keras.applications import vgg16 from keras.layers import Activation, Dropout, Flatten, Dense from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator import numpy as np # linear algebra train_path = '../input/yoga-poses-dataset/DATASET/TRAIN' test_path = '../input/yoga-poses-dataset/DATASET/TEST' from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1 / 255.0, rotation_range=10, zoom_range=0.3, shear_range=0.3, fill_mode='nearest', validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1 / 255.0) from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 8 train_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='training', shuffle=True, seed=42) validation_generator = train_datagen.flow_from_directory(directory=train_path, color_mode='rgb', target_size=(224, 224), batch_size=batch_size, class_mode='sparse', subset='validation', shuffle=True, seed=42) test_generator = test_datagen.flow_from_directory(directory=test_path, target_size=(224, 224), color_mode='rgb', batch_size=batch_size, class_mode='sparse', shuffle=False, seed=42) from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max') for layer in base_model.layers[:-5]: layer.trainable = False from keras.models import Model from keras.layers import Activation, Dropout, Flatten, Dense last_output = base_model.output x = Dense(train_generator.num_classes, activation='softmax', name='softmax')(last_output) model = Model(inputs=base_model.input, outputs=x) model.summary() from keras.optimizers import Adam epochs = 10 learning_rate = 0.0001 opt = Adam(learning_rate=learning_rate, decay=learning_rate / (epochs * 0.5)) model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) history = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=validation_generator.n // validation_generator.batch_size, epochs=10) score = model.evaluate(test_generator) predict = np.argmax(model.predict(test_generator), axis=1)
code
122251788/cell_5
[ "text_plain_output_1.png" ]
from keras.applications import vgg16 from keras.applications import vgg16 base_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='max')
code
89126900/cell_4
[ "text_plain_output_1.png" ]
import datetime import pandas as pd import numpy as np import pandas as pd import re import datetime from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt input_dir = '/kaggle/input/tabular-playground-series-mar-2022/' def handle_dates(df): df['datetime'] = pd.to_datetime(df['time']) df['time'] = [datetime.datetime.time(d) for d in df.loc[:, 'datetime']] time_mapping = {t: ii for ii, t in enumerate(train.time.unique())} df['time_number'] = [time_mapping[d] for d in df.loc[:, 'time']] df['date'] = [datetime.datetime.date(d) for d in df.loc[:, 'datetime']] df['weekday'] = [d.weekday() for d in df.datetime] return df train = pd.read_csv(input_dir + 'train.csv') test = pd.read_csv(input_dir + 'test.csv') train = handle_dates(train) test = handle_dates(test) print('Train shape: ' + str(train.shape) + ', Test shape:' + str(test.shape))
code
89126900/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import datetime import pandas as pd import numpy as np import pandas as pd import re import datetime from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt input_dir = '/kaggle/input/tabular-playground-series-mar-2022/' def handle_dates(df): df['datetime'] = pd.to_datetime(df['time']) df['time'] = [datetime.datetime.time(d) for d in df.loc[:, 'datetime']] time_mapping = {t: ii for ii, t in enumerate(train.time.unique())} df['time_number'] = [time_mapping[d] for d in df.loc[:, 'time']] df['date'] = [datetime.datetime.date(d) for d in df.loc[:, 'datetime']] df['weekday'] = [d.weekday() for d in df.datetime] return df train = pd.read_csv(input_dir + 'train.csv') test = pd.read_csv(input_dir + 'test.csv') train = handle_dates(train) test = handle_dates(test) train.loc[:, 'loc_dir_time'] = [str(x) + str(y) + direction + str(t) for _, x, y, direction, t in train.loc[:, ['x', 'y', 'direction', 'time_number']].itertuples()] test.loc[:, 'loc_dir_time'] = [str(x) + str(y) + direction + str(t) for _, x, y, direction, t in test.loc[:, ['x', 'y', 'direction', 'time_number']].itertuples()] Xy = train.loc[:, ['loc_dir_time', 'weekday', 'congestion', 'date']] Xy = pd.pivot_table(Xy, values='congestion', index=['date', 'weekday'], columns=['loc_dir_time']).reset_index() FINAL = Xy.query('date==datetime.date(1991,9,30)') Xy = Xy.query('date<datetime.date(1991,9,30)') Xy = Xy.fillna(Xy.groupby('weekday').transform('median')).set_index('date')
code
50224683/cell_13
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 filenames = os.listdir('/kaggle/working/train') filenames def load_data(filenames): i = 50 X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join('/kaggle/working/train', name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T return X X = refine_data(X) X.shape layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters
code
50224683/cell_4
[ "text_plain_output_1.png" ]
import zipfile def extract_files(source_path, target_path): zip_ref = zipfile.ZipFile(source_path, 'r') zip_ref.extractall(target_path) zip_ref.close() extract_files('/kaggle/input/dogs-vs-cats/test1.zip', '/kaggle/working/') extract_files('/kaggle/input/dogs-vs-cats/train.zip', '/kaggle/working/')
code
50224683/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os print(os.listdir('../input/dogs-vs-cats'))
code
50224683/cell_18
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 filenames = os.listdir('/kaggle/working/train') filenames def load_data(filenames): i = 50 X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join('/kaggle/working/train', name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T return X X = refine_data(X) X.shape layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters def linear_fwd(A, W, b): Z = np.dot(W, A) + b cache = (A, W, b) return (Z, cache) Z, cache = linear_fwd(X, parameters['W1'], parameters['b1']) Z.shape def sigmoid(Z): A = 1 / (1 + np.exp(-Z)) cache = Z return (A, Z) def relu(Z): A = np.maximum(Z, 0) cache = Z return (A, Z) sigmoid(np.array([0, 2])) relu(np.array([-50, 50]))
code
50224683/cell_8
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import random import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 filenames = os.listdir('/kaggle/working/train') filenames def load_data(filenames): i = 50 X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join('/kaggle/working/train', name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) sample = random.choice(filenames) print(sample) plt.imshow(mpimg.imread('/kaggle/working/train/' + sample)) plt.show()
code
50224683/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 filenames = os.listdir('/kaggle/working/train') filenames def load_data(filenames): i = 50 X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join('/kaggle/working/train', name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T return X X = refine_data(X) X.shape layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters def linear_fwd(A, W, b): Z = np.dot(W, A) + b cache = (A, W, b) return (Z, cache) Z, cache = linear_fwd(X, parameters['W1'], parameters['b1']) Z.shape
code
50224683/cell_17
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 filenames = os.listdir('/kaggle/working/train') filenames def load_data(filenames): i = 50 X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join('/kaggle/working/train', name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T return X X = refine_data(X) X.shape layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters def linear_fwd(A, W, b): Z = np.dot(W, A) + b cache = (A, W, b) return (Z, cache) Z, cache = linear_fwd(X, parameters['W1'], parameters['b1']) Z.shape def sigmoid(Z): A = 1 / (1 + np.exp(-Z)) cache = Z return (A, Z) def relu(Z): A = np.maximum(Z, 0) cache = Z return (A, Z) sigmoid(np.array([0, 2]))
code
50224683/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 filenames = os.listdir('/kaggle/working/train') filenames def load_data(filenames): i = 50 X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join('/kaggle/working/train', name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T return X X = refine_data(X) X.shape
code
50224683/cell_5
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os filenames = os.listdir('/kaggle/working/train') filenames
code
2001025/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd def get_xyz_data(filename): pos_data = [] lat_data = [] with open(filename) as f: for line in f.readlines(): x = line.split() if x[0] == 'atom': pos_data.append([np.array(x[1:4], dtype=np.float), x[4]]) elif x[0] == 'lattice_vector': lat_data.append(np.array(x[1:4], dtype=np.float)) A = np.transpose(lat_data) B = np.linalg.inv(A) R = pos_data[0][0] return np.matmul(B, R) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') for c in train.columns: if c.find('angle') != -1: print(c) train[c] = np.radians(train[c]) test[c] = np.radians(test[c]) traindata = np.zeros((train.shape[0], 3)) for i, idx in enumerate(train.id.values): fn = '../input/train/{}/geometry.xyz'.format(idx) data = get_xyz_data(fn) traindata[i, :] = data testdata = np.zeros((test.shape[0], 3)) for i, idx in enumerate(test.id.values): fn = '../input/test/{}/geometry.xyz'.format(idx) data = get_xyz_data(fn) testdata[i, :] = data train['a0'] = 0 train['a1'] = 0 train['a2'] = 0 train[['a0', 'a1', 'a2']] = traindata test['a0'] = 0 test['a1'] = 0 test['a2'] = 0 test[['a0', 'a1', 'a2']] = testdata train.number_of_total_atoms = np.log(train.number_of_total_atoms) test.number_of_total_atoms = np.log(test.number_of_total_atoms) alldata = pd.concat([train, test]) alldata = pd.concat([alldata.drop(['spacegroup'], axis=1), pd.get_dummies(alldata['spacegroup'], prefix='SG')], axis=1) train = alldata[:train.shape[0]].copy() test = alldata[train.shape[0]:].copy() target_fe = np.log1p(train.formation_energy_ev_natom) target_be = np.log1p(train.bandgap_energy_ev) del train['formation_energy_ev_natom'], train['bandgap_energy_ev'], train['id'], test['id']
code
18100300/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'a': 10, 'b': 20, 'c': 30} pd.Series(data=my_data)
code
18100300/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'a': 10, 'b': 20, 'c': 30} pd.Series(data=my_data) pd.Series(data=my_data, index=label)
code
33099888/cell_13
[ "text_html_output_1.png" ]
import os import pandas as pd import requests import unidecode INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape list(covid19co.columns.values) covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns] covid19co.head()
code
33099888/cell_11
[ "text_html_output_1.png" ]
import os import pandas as pd import requests INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape covid19co.tail()
code
33099888/cell_15
[ "text_html_output_1.png" ]
import os import pandas as pd import requests import unidecode INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape list(covid19co.columns.values) covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns] # Update texto to title text format for attr in covid19co.columns: if covid19co[attr].dtypes == 'object': covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title()) # Show dataframe covid19co.head() if covid19co.isna().sum().sum() > 0: covid19co.fillna(value='-', inplace=True) covid19co.head()
code
33099888/cell_16
[ "text_html_output_1.png" ]
import os import pandas as pd import requests import unidecode INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape list(covid19co.columns.values) covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns] # Update texto to title text format for attr in covid19co.columns: if covid19co[attr].dtypes == 'object': covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title()) # Show dataframe covid19co.head() if covid19co.isna().sum().sum() > 0: covid19co.fillna(value='-', inplace=True) def setup_date(value): try: value = value.split('T')[0].split('-') if len(value) == 3: value = value[2] + '/' + value[1] + '/' + value[0] else: value = '-' except IndexError: value = '-' if len(value) != 10 and len(value) != 1: value = '-' return value date_columns = list(filter(lambda value: value.find('FECHA') != -1 or value.find('FIS') != -1, covid19co.columns)) for date_column in date_columns: covid19co[date_column] = covid19co[date_column].transform(lambda value: setup_date(value)) covid19co.head()
code
33099888/cell_14
[ "text_html_output_1.png" ]
import os import pandas as pd import requests import unidecode INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape list(covid19co.columns.values) covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns] for attr in covid19co.columns: if covid19co[attr].dtypes == 'object': covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title()) covid19co.head()
code
33099888/cell_10
[ "text_plain_output_1.png" ]
import os import pandas as pd import requests INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape
code
33099888/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd import requests INPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': INPUT_DIR = '../input' OUTPUT_DIR = './' if os.path.split(os.path.abspath('.'))[-1] == 'src': OUTPUT_DIR = '../output' URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD' URL_SAMPLES_PROCESSED = 'https://e.infogram.com/api/live/flex/638d656c-c77b-4326-97d3-e50cb410c6ab/8188140c-8352-4994-85e3-2100a4dbd9db?' with requests.get(URL_OFFICIAL_DATASET) as official_dataset: with open(os.path.join(OUTPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file: dataset_file.write(official_dataset.content) covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_official.csv')) covid19co.shape list(covid19co.columns.values)
code
16115465/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_original = pd.read_csv('../input/train.csv') structures_original = pd.read_csv('../input/structures.csv') test_original = pd.read_csv('../input/test.csv') tmp_merge = pd.DataFrame.merge(train_original, structures_original, how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']) tmp_merge = tmp_merge.merge(structures_original, how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']) tmp_merge.drop(columns=['atom_index_x', 'atom_index_y'], inplace=True) tmp_merge.columns = ['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'scalar_coupling_constant', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1'] train = tmp_merge[['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1', 'scalar_coupling_constant']] train.sort_values(by=['id', 'molecule_name'], inplace=True) train.reset_index(inplace=True, drop=True) tmp_merge = None tmp_merge = pd.DataFrame.merge(test_original, structures_original, how='inner', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']) tmp_merge = tmp_merge.merge(structures_original, how='inner', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']) tmp_merge.drop(columns=['atom_index_x', 'atom_index_y'], inplace=True) tmp_merge.columns = ['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1'] test = tmp_merge[['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1']] test.sort_values(by=['id', 'molecule_name'], inplace=True) test.reset_index(inplace=True, drop=True) tmp_merge = None del tmp_merge moleculeCount = train.groupby(['molecule_name'])['atom_0'].nunique() hydrogen = pd.DataFrame() hydrogen['molecule_name'] = moleculeCount.index hydrogen['hydrogenCnt'] = moleculeCount.values moleculeCount = train[train['atom_nm_1'] == 'C'].groupby(['molecule_name'])['atom_1'].nunique() carbon = pd.DataFrame() carbon['molecule_name'] = moleculeCount.index carbon['carbonCnt'] = moleculeCount.values moleculeCount = train[train['atom_nm_1'] == 'N'].groupby(['molecule_name'])['atom_1'].nunique() nitrogen = pd.DataFrame() nitrogen['molecule_name'] = moleculeCount.index nitrogen['nitrogenCnt'] = moleculeCount.values moleculeCount = None del moleculeCount train = pd.DataFrame.merge(train, hydrogen, how='left', on='molecule_name') train = train.merge(carbon, how='left', on='molecule_name') train = train.merge(nitrogen, how='left', on='molecule_name') train.carbonCnt.fillna(0, inplace=True) train.nitrogenCnt.fillna(0, inplace=True) train.head()
code
16115465/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16115465/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_original = pd.read_csv('../input/train.csv') structures_original = pd.read_csv('../input/structures.csv') test_original = pd.read_csv('../input/test.csv') tmp_merge = pd.DataFrame.merge(train_original, structures_original, how='left', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']) tmp_merge = tmp_merge.merge(structures_original, how='left', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']) tmp_merge.drop(columns=['atom_index_x', 'atom_index_y'], inplace=True) tmp_merge.columns = ['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'scalar_coupling_constant', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1'] train = tmp_merge[['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1', 'scalar_coupling_constant']] train.sort_values(by=['id', 'molecule_name'], inplace=True) train.reset_index(inplace=True, drop=True) tmp_merge = None tmp_merge = pd.DataFrame.merge(test_original, structures_original, how='inner', left_on=['molecule_name', 'atom_index_0'], right_on=['molecule_name', 'atom_index']) tmp_merge = tmp_merge.merge(structures_original, how='inner', left_on=['molecule_name', 'atom_index_1'], right_on=['molecule_name', 'atom_index']) tmp_merge.drop(columns=['atom_index_x', 'atom_index_y'], inplace=True) tmp_merge.columns = ['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1'] test = tmp_merge[['id', 'molecule_name', 'atom_0', 'atom_1', 'type', 'atom_nm_0', 'x_0', 'y_0', 'z_0', 'atom_nm_1', 'x_1', 'y_1', 'z_1']] test.sort_values(by=['id', 'molecule_name'], inplace=True) test.reset_index(inplace=True, drop=True) tmp_merge = None del tmp_merge moleculeCount = train.groupby(['molecule_name'])['atom_0'].nunique() hydrogen = pd.DataFrame() hydrogen['molecule_name'] = moleculeCount.index hydrogen['hydrogenCnt'] = moleculeCount.values moleculeCount = train[train['atom_nm_1'] == 'C'].groupby(['molecule_name'])['atom_1'].nunique() carbon = pd.DataFrame() carbon['molecule_name'] = moleculeCount.index carbon['carbonCnt'] = moleculeCount.values moleculeCount = train[train['atom_nm_1'] == 'N'].groupby(['molecule_name'])['atom_1'].nunique() nitrogen = pd.DataFrame() nitrogen['molecule_name'] = moleculeCount.index nitrogen['nitrogenCnt'] = moleculeCount.values moleculeCount = None del moleculeCount train = pd.DataFrame.merge(train, hydrogen, how='left', on='molecule_name') train = train.merge(carbon, how='left', on='molecule_name') train = train.merge(nitrogen, how='left', on='molecule_name') train.carbonCnt.fillna(0, inplace=True) train.nitrogenCnt.fillna(0, inplace=True) moleculeCount = test.groupby(['molecule_name'])['atom_0'].nunique() hydrogen = pd.DataFrame() hydrogen['molecule_name'] = moleculeCount.index hydrogen['hydrogenCnt'] = moleculeCount.values moleculeCount = test[test['atom_nm_1'] == 'C'].groupby(['molecule_name'])['atom_1'].nunique() carbon = pd.DataFrame() carbon['molecule_name'] = moleculeCount.index carbon['carbonCnt'] = moleculeCount.values moleculeCount = test[test['atom_nm_1'] == 'N'].groupby(['molecule_name'])['atom_1'].nunique() nitrogen = pd.DataFrame() nitrogen['molecule_name'] = moleculeCount.index nitrogen['nitrogenCnt'] = moleculeCount.values moleculeCount = None del moleculeCount test = pd.DataFrame.merge(test, hydrogen, how='left', on='molecule_name') test = test.merge(carbon, how='left', on='molecule_name') test = test.merge(nitrogen, how='left', on='molecule_name') test.carbonCnt.fillna(0, inplace=True) test.nitrogenCnt.fillna(0, inplace=True) test.head()
code
32062535/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date']) test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv', parse_dates=['Date']) countryinfo = pd.read_csv('../input/countryinfo/covid19countryinfo.csv', parse_dates=['quarantine', 'schools', 'publicplace', 'nonessential', 'gathering']) states = pd.read_csv('../input/covid19-in-usa/us_states_covid19_daily.csv') state_pops = pd.read_csv('../input/us-state-populations-2018/State Populations.csv') dataset = train[train['Date'] == '2020-04-12'].sort_values(by=['ConfirmedCases'], ascending=False).head(100) merged = dataset.merge(countryinfo, left_on=['Country_Region', 'Province_State'], right_on=['country', 'region'], how='left').merge(state_pops, left_on=['Province_State'], right_on=['State'], how='left')[['Id', 'Country_Region', 'Province_State', 'Date', 'ConfirmedCases', 'Fatalities', 'pop', 'density', 'quarantine', 'schools', 'publicplace', 'nonessential', 'gathering', '2018 Population']] merged.head(20) merged.dtypes
code
32062535/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date']) test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv', parse_dates=['Date']) countryinfo = pd.read_csv('../input/countryinfo/covid19countryinfo.csv', parse_dates=['quarantine', 'schools', 'publicplace', 'nonessential', 'gathering']) states = pd.read_csv('../input/covid19-in-usa/us_states_covid19_daily.csv') state_pops = pd.read_csv('../input/us-state-populations-2018/State Populations.csv') state_pops.head()
code
50240297/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex1 = main_df[['Survived', 'Sex']] main_df_sex1 = main_df_sex1.value_counts().to_frame() main_df_sex1.reset_index(drop=False, inplace=True) main_df_sex1.rename(columns={0: 'Counts'}, inplace=True) main_df_sex1['Survived'] = main_df_sex1['Survived'].replace([0, 1], ['Not-Survived', 'Survived']) main_df_sex1.set_index(['Survived', 'Sex'], drop=True, inplace=True) main_df_sex1 main_df_sex1.reset_index(drop=False, inplace=True) for i in main_df_sex1.index: if main_df_sex1.iloc[i]['Sex'] == 'male': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 577 * 100 if main_df_sex1.iloc[i]['Sex'] == 'female': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 312 * 100 main_df_sex1 male_stats = main_df_sex1[main_df_sex1['Sex'] == 'male'] male_stats.drop(['Counts'], axis=1, inplace=True) male_stats.set_index(['Survived', 'Sex'], drop=True, inplace=True) female_stats = main_df_sex1[main_df_sex1['Sex'] == 'female'] female_stats.drop(['Counts'], axis=1, inplace=True) female_stats.set_index(['Survived', 'Sex'], drop=True, inplace=True)
code
50240297/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex = main_df['Sex'].value_counts() main_df_sex
code
50240297/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df['Cabin'].unique
code
50240297/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) main_df.info()
code
50240297/cell_23
[ "text_html_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt final_sx_df.plot(kind='bar', rot=90, color='#FF0000') plt.xlabel('Survival % based on sex') plt.ylabel('% of Survived') plt.title('Impact of sex of a passenger on their survival rate on Titanic') plt.show()
code
50240297/cell_30
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) working_df = main_df working_df.drop(['Name', 'Cabin', 'Ticket'], inplace=True, axis=1) working_class_df = working_df[['Pclass', 'Survived']] working_class_df working_class_df_plot = working_class_df.groupby(['Pclass', 'Survived'])['Pclass'].count().to_frame() working_class_df_plot.rename(columns={'Pclass': 'Count'}, inplace=True) working_class_df_plot working_class_df_plot.reset_index(inplace=True) working_class_df_plot
code
50240297/cell_20
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex1 = main_df[['Survived', 'Sex']] main_df_sex1 = main_df_sex1.value_counts().to_frame() main_df_sex1.reset_index(drop=False, inplace=True) main_df_sex1.rename(columns={0: 'Counts'}, inplace=True) main_df_sex1['Survived'] = main_df_sex1['Survived'].replace([0, 1], ['Not-Survived', 'Survived']) main_df_sex1.set_index(['Survived', 'Sex'], drop=True, inplace=True) main_df_sex1 main_df_sex1.reset_index(drop=False, inplace=True) for i in main_df_sex1.index: if main_df_sex1.iloc[i]['Sex'] == 'male': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 577 * 100 if main_df_sex1.iloc[i]['Sex'] == 'female': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 312 * 100 main_df_sex1 final_sx_df = main_df_sex1[['Survived', 'Sex', '%']].sort_values(by=['Survived', 'Sex']) final_sx_df.set_index(['Survived', 'Sex'], inplace=True, drop=True) final_sx_df
code
50240297/cell_26
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) working_df = main_df working_df.drop(['Name', 'Cabin', 'Ticket'], inplace=True, axis=1) working_df
code
50240297/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) print('Files Imported!')
code
50240297/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df['Embarked'].unique
code
50240297/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() main_df.drop(main_df[main_df['Embarked'].isnull()].index, inplace=True, axis=0) main_df_sex1 = main_df[['Survived', 'Sex']] main_df_sex1 = main_df_sex1.value_counts().to_frame() main_df_sex1.reset_index(drop=False, inplace=True) main_df_sex1.rename(columns={0: 'Counts'}, inplace=True) main_df_sex1['Survived'] = main_df_sex1['Survived'].replace([0, 1], ['Not-Survived', 'Survived']) main_df_sex1.set_index(['Survived', 'Sex'], drop=True, inplace=True) main_df_sex1 main_df_sex1.reset_index(drop=False, inplace=True) for i in main_df_sex1.index: print(str(i)) if main_df_sex1.iloc[i]['Sex'] == 'male': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 577 * 100 if main_df_sex1.iloc[i]['Sex'] == 'female': main_df_sex1.loc[i, '%'] = main_df_sex1.iloc[i]['Counts'] / 312 * 100 main_df_sex1
code
50240297/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50240297/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file_path = '../input/titanic/train.csv' gs_file_path = '../input/titanic/gender_submission.csv' main_df = pd.read_csv(train_file_path) gender_sub_df = pd.read_csv(gs_file_path) cols1 = main_df.columns.to_list() cols2 = gender_sub_df.columns.to_list() print('************** Train Dataset *************************') for i in cols1: print('Column :' + i + ' count : ' + str(main_df[i].isnull().sum())) print('************** Gender submission Dataset *************************') for i in cols2: print('Column :' + i + ' count : ' + str(gender_sub_df[i].isnull().sum()))
code