path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17139134/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/IMDB-Movie-Data.csv') data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True) for col in data.columns: nans = pd.value_counts(data[col].isnull()) Nans = data[pd.isnull(data).any(axis=1)] print(Nans.head())
code
17139134/cell_5
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/IMDB-Movie-Data.csv') print(data.columns) print(50 * '-') print(data.dtypes) print(50 * '-') print(data.shape)
code
90108657/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} def set_seed(SEED): os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) tf.random.set_seed(SEED) set_seed(config['SEED']) def get_device(device): if device == 'TPU': try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: if device == 'GPU': strategy = tf.distribute.MirroredStrategy() return strategy strategy = get_device(config['device']) config['batch_size'] = config['batch_size'] * strategy.num_replicas_in_sync train_ds = tf.data.Dataset.from_tensor_slices((X_train.spec_path.values, X_train.genre_id.values)) valid_ds = tf.data.Dataset.from_tensor_slices((X_valid.spec_path.values, X_valid.genre_id.values)) AUTOTUNE = tf.data.experimental.AUTOTUNE def process_data_train(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.random_brightness(img, 0.1) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) def process_data_valid(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) train_ds = train_ds.map(process_data_train, num_parallel_calls=AUTOTUNE) valid_ds = valid_ds.map(process_data_valid, num_parallel_calls=AUTOTUNE) def configure_for_performance(ds, batch_size = 32): ds = ds.cache('/kaggle/dump.tfcache') ds = ds.shuffle(buffer_size=32) ds = ds.batch(batch_size) ds = ds.prefetch(buffer_size=AUTOTUNE) return ds train_ds_batch = configure_for_performance(train_ds, config['batch_size']) valid_ds_batch = valid_ds.batch(config['batch_size']*2) image_batch, label_batch = next(iter(train_ds_batch)) plt.figure(figsize=(10, 10)) for i in range(8): ax = plt.subplot(4, 4, i + 1) plt.imshow(image_batch[i].numpy().astype('uint8')) label = label_batch[i].numpy() plt.title(label) plt.axis('off')
code
90108657/cell_9
[ "text_plain_output_100.png", "text_plain_output_84.png", "text_plain_output_56.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_35.png", "text_plain_output_130.png", "text_plain_output_117.png", "text_plain_output_98.png", "text_plain_output_43.png", "text_plain_output_78.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_5.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_115.png", "text_plain_output_15.png", "text_plain_output_133.png", "text_plain_output_114.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_119.png", "text_plain_output_86.png", "text_plain_output_118.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_102.png", "text_plain_output_111.png", "text_plain_output_101.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "text_plain_output_13.png", "text_plain_output_107.png", "text_plain_output_52.png", "text_plain_output_66.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_88.png", "text_plain_output_29.png", "text_plain_output_140.png", "text_plain_output_129.png", "text_plain_output_58.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_27.png", "text_plain_output_76.png", "text_plain_output_108.png", "text_plain_output_54.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_92.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_104.png", "text_plain_output_47.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_77.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_96.png", "text_plain_output_87.png", "text_plain_output_141.png", "text_plain_output_112.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_113.png", "text_plain_output_22.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_125.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_91.png", "text_plain_output_16.png", "text_plain_output_59.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_26.png", "text_plain_output_109.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_85.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_67.png", "text_plain_output_53.png", "text_plain_output_23.png", "text_plain_output_89.png", "text_plain_output_51.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_136.png", "text_plain_output_2.png", "text_plain_output_127.png", "text_plain_output_97.png", "text_plain_output_33.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_39.png", "text_plain_output_55.png", "text_plain_output_82.png", "text_plain_output_93.png", "text_plain_output_19.png", "text_plain_output_105.png", "text_plain_output_80.png", "text_plain_output_94.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_62.png", "text_plain_output_95.png", "text_plain_output_61.png", "text_plain_output_83.png", "text_plain_output_135.png", "text_plain_output_46.png" ]
import numpy as np import os import tensorflow as tf config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} def set_seed(SEED): os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) tf.random.set_seed(SEED) set_seed(config['SEED']) def get_device(device): if device == 'TPU': try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: if device == 'GPU': strategy = tf.distribute.MirroredStrategy() return strategy strategy = get_device(config['device']) config['batch_size'] = config['batch_size'] * strategy.num_replicas_in_sync train_ds = tf.data.Dataset.from_tensor_slices((X_train.spec_path.values, X_train.genre_id.values)) valid_ds = tf.data.Dataset.from_tensor_slices((X_valid.spec_path.values, X_valid.genre_id.values)) for path, label in train_ds.take(5): print('Path: {}, Label: {}'.format(path, label))
code
90108657/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import os import tensorflow as tf config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} def set_seed(SEED): os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) tf.random.set_seed(SEED) set_seed(config['SEED']) def get_device(device): if device == 'TPU': try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: print('Cannot initialize TPU') if device == 'GPU': strategy = tf.distribute.MirroredStrategy() print('Number of accelerators: ', strategy.num_replicas_in_sync) return strategy strategy = get_device(config['device']) config['batch_size'] = config['batch_size'] * strategy.num_replicas_in_sync
code
90108657/cell_6
[ "text_plain_output_1.png" ]
import glob import pandas as pd config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} df_train = pd.read_csv('../input/kaggle-pog-series-s01e02/train.csv') df_test = pd.read_csv('../input/kaggle-pog-series-s01e02/test.csv') df_train['ID'] = df_train['filename'].str.split('.').str[0] df_test['ID'] = df_test['filename'].str.split('.').str[0] train_files = pd.DataFrame(glob.glob('../input/pog-train-and-test-spectograms/kaggle/spectograms/train/*.jpg'), columns=['spec_path']) test_files = pd.DataFrame(glob.glob('../input/pog-train-and-test-spectograms/kaggle/spectograms/test/*.jpg'), columns=['spec_path']) train_files['ID'] = train_files['spec_path'].str.split('/').str[-1].str.split('.').str[0] test_files['ID'] = test_files['spec_path'].str.split('/').str[-1].str.split('.').str[0] df_train_spec = pd.merge(df_train, train_files, how='right', on='ID') df_test_spec = pd.merge(df_test, test_files, how='right', on='ID') config['num_labels'] = df_train_spec['genre_id'].nunique() df_train_spec.genre_id.value_counts(normalize=True) * 100
code
90108657/cell_11
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} def set_seed(SEED): os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) tf.random.set_seed(SEED) set_seed(config['SEED']) def get_device(device): if device == 'TPU': try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: if device == 'GPU': strategy = tf.distribute.MirroredStrategy() return strategy strategy = get_device(config['device']) config['batch_size'] = config['batch_size'] * strategy.num_replicas_in_sync train_ds = tf.data.Dataset.from_tensor_slices((X_train.spec_path.values, X_train.genre_id.values)) valid_ds = tf.data.Dataset.from_tensor_slices((X_valid.spec_path.values, X_valid.genre_id.values)) AUTOTUNE = tf.data.experimental.AUTOTUNE def process_data_train(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.random_brightness(img, 0.1) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) def process_data_valid(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) train_ds = train_ds.map(process_data_train, num_parallel_calls=AUTOTUNE) valid_ds = valid_ds.map(process_data_valid, num_parallel_calls=AUTOTUNE) for image, label in train_ds.take(1): plt.imshow(image.numpy().astype('uint8')) plt.show() print('Image shape: ', image.numpy().shape) print('Label: ', label.numpy())
code
90108657/cell_18
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} def set_seed(SEED): os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) tf.random.set_seed(SEED) set_seed(config['SEED']) def get_device(device): if device == 'TPU': try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: if device == 'GPU': strategy = tf.distribute.MirroredStrategy() return strategy strategy = get_device(config['device']) config['batch_size'] = config['batch_size'] * strategy.num_replicas_in_sync train_ds = tf.data.Dataset.from_tensor_slices((X_train.spec_path.values, X_train.genre_id.values)) valid_ds = tf.data.Dataset.from_tensor_slices((X_valid.spec_path.values, X_valid.genre_id.values)) AUTOTUNE = tf.data.experimental.AUTOTUNE def process_data_train(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.random_brightness(img, 0.1) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) def process_data_valid(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) train_ds = train_ds.map(process_data_train, num_parallel_calls=AUTOTUNE) valid_ds = valid_ds.map(process_data_valid, num_parallel_calls=AUTOTUNE) def configure_for_performance(ds, batch_size = 32): ds = ds.cache('/kaggle/dump.tfcache') ds = ds.shuffle(buffer_size=32) ds = ds.batch(batch_size) ds = ds.prefetch(buffer_size=AUTOTUNE) return ds train_ds_batch = configure_for_performance(train_ds, config['batch_size']) valid_ds_batch = valid_ds.batch(config['batch_size']*2) with strategy.scope(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(16, kernel_size=5, activation='relu', input_shape=(config['img_size'], config['img_size'], 3)), tf.keras.layers.Conv2D(32, kernel_size=3, activation='relu'), tf.keras.layers.Conv2D(64, kernel_size=3, strides=2, activation='relu'), tf.keras.layers.Conv2D(128, kernel_size=3, activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(config['num_labels'], activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer='adam', metrics='accuracy') model.summary() best_weight_path = 'best_model.hdf5' last_weight_path = 'last_model.hdf5' checkpoint = tf.keras.callbacks.ModelCheckpoint(best_weight_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=False) checkpoint_last = tf.keras.callbacks.ModelCheckpoint(last_weight_path, monitor='val_loss', verbose=1, save_best_only=False, mode='min', save_weights_only=False) early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=5) reduceLROnPlat = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=2, verbose=1, mode='auto', epsilon=0.0001, cooldown=5, min_lr=1e-05) callbacks_list = [checkpoint, checkpoint_last, early, reduceLROnPlat] history = model.fit(train_ds_batch, callbacks=callbacks_list, epochs=config['epochs'], validation_data=valid_ds_batch)
code
90108657/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf config = {'SEED': 42, 'DEBUG': False, 'test_size': 0.1, 'img_size': 128, 'batch_size': 32, 'num_labels': 0, 'epochs': 10, 'device': 'GPU'} debug_config = {} def set_seed(SEED): os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) tf.random.set_seed(SEED) set_seed(config['SEED']) def get_device(device): if device == 'TPU': try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() strategy = tf.distribute.TPUStrategy(tpu) except ValueError: if device == 'GPU': strategy = tf.distribute.MirroredStrategy() return strategy strategy = get_device(config['device']) config['batch_size'] = config['batch_size'] * strategy.num_replicas_in_sync train_ds = tf.data.Dataset.from_tensor_slices((X_train.spec_path.values, X_train.genre_id.values)) valid_ds = tf.data.Dataset.from_tensor_slices((X_valid.spec_path.values, X_valid.genre_id.values)) AUTOTUNE = tf.data.experimental.AUTOTUNE def process_data_train(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.random_brightness(img, 0.1) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) def process_data_valid(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, [config['img_size'], config['img_size']]) return (img, label) train_ds = train_ds.map(process_data_train, num_parallel_calls=AUTOTUNE) valid_ds = valid_ds.map(process_data_valid, num_parallel_calls=AUTOTUNE) with strategy.scope(): model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(16, kernel_size=5, activation='relu', input_shape=(config['img_size'], config['img_size'], 3)), tf.keras.layers.Conv2D(32, kernel_size=3, activation='relu'), tf.keras.layers.Conv2D(64, kernel_size=3, strides=2, activation='relu'), tf.keras.layers.Conv2D(128, kernel_size=3, activation='relu'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(config['num_labels'], activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer='adam', metrics='accuracy') model.summary()
code
90108657/cell_17
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
!nvidia-smi
code
74057413/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) AMR_data = pd.read_csv('../input/antibiotic-resistance-genes/Efaecium_AMRC.csv') AMR_data.dtypes print(AMR_data.shape) print('*****************') print(AMR_data.nunique()) print('*****************') print(AMR_data[AMR_data['CRISPR_Cas'] == 1]['AMR'].count()) print(AMR_data[AMR_data['CRISPR_Cas'] == 0]['AMR'].count())
code
74057413/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) AMR_data = pd.read_csv('../input/antibiotic-resistance-genes/Efaecium_AMRC.csv') AMR_data.dtypes
code
74057413/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns AMR_data = pd.read_csv('../input/antibiotic-resistance-genes/Efaecium_AMRC.csv') AMR_data.dtypes sns.displot(AMR_data, x='CRISPR_Cas')
code
74057413/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74057413/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) AMR_data = pd.read_csv('../input/antibiotic-resistance-genes/Efaecium_AMRC.csv') AMR_data.dtypes AMR_data.describe(include='all')
code
74057413/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns AMR_data = pd.read_csv('../input/antibiotic-resistance-genes/Efaecium_AMRC.csv') AMR_data.dtypes sns.set(style='darkgrid') sns.jointplot(y=AMR_data['CRISPR_Cas'], x=AMR_data['AMR'])
code
74057413/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) AMR_data = pd.read_csv('../input/antibiotic-resistance-genes/Efaecium_AMRC.csv') AMR_data.head(15)
code
16153001/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
summarizeColumns(transactions)
code
16153001/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
library(data.table) library(mlr)
code
16153001/cell_3
[ "text_html_output_1.png" ]
head(image)
code
129012879/cell_4
[ "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
!pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.11/index.html
code
129012879/cell_23
[ "image_output_1.png" ]
from PIL import Image from mmcv import Config from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot from mmseg.apis import set_random_seed from mmseg.apis import train_segmentor from mmseg.datasets import build_dataset from mmseg.datasets.builder import DATASETS from mmseg.datasets.custom import CustomDataset from mmseg.models import build_segmentor import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np import os.path as osp import mmcv import matplotlib.pyplot as plt img = mmcv.imread('nails/images/2C29D473-CCB4-458C-926B-99D0042161E6.jpg') import os.path as osp import numpy as np from PIL import Image data_root = 'nails' img_dir = 'images' ann_dir = 'labels' classes = ('background', 'nails') palette = [[0, 0, 0], [225, 25, 38]] for file in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.jpg'): seg_img = Image.open(osp.join(data_root, ann_dir, file)).convert('P') seg_img.putpalette(np.array(palette, dtype=np.uint8)) seg_img.save(osp.join(data_root, ann_dir, file.replace('.jpg', '.png'))) # Let's take a look at the segmentation map we got import matplotlib.patches as mpatches img = Image.open('/content/mmsegmentation/nails/labels/09aefeec-e05f-11e8-87a6-0242ac1c0002.png') plt.figure(figsize=(8, 6)) im = plt.imshow(np.array(img.convert('RGB'))) # create a patch (proxy artist) for every color patches = [mpatches.Patch(color=np.array(palette[i])/255., label=classes[i]) for i in range(2)] # put those patched as legend-handles into the legend plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize='large') plt.show() split_dir = 'splits' mmcv.mkdir_or_exist(osp.join(data_root, split_dir)) filename_list = [osp.splitext(filename)[0] for filename in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.png')] with open(osp.join(data_root, split_dir, 'train.txt'), 'w') as f: train_length = int(len(filename_list) * 0.85) f.writelines((line + '\n' for line in filename_list[:train_length])) with open(osp.join(data_root, split_dir, 'val.txt'), 'w') as f: f.writelines((line + '\n' for line in filename_list[train_length:])) from mmseg.datasets.builder import DATASETS from mmseg.datasets.custom import CustomDataset @DATASETS.register_module() class NailsDataset(CustomDataset): CLASSES = classes PALETTE = palette def __init__(self, split, **kwargs): super().__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) assert osp.exists(self.img_dir) and self.split is not None from mmcv import Config cfg = Config.fromfile('configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py ') from mmseg.apis import set_random_seed cfg.norm_cfg = dict(type='BN', requires_grad=True) cfg.model.backbone.norm_cfg = cfg.norm_cfg cfg.model.decode_head.norm_cfg = cfg.norm_cfg cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg cfg.model.decode_head.num_classes = 2 cfg.model.auxiliary_head.num_classes = 2 cfg.model.decode_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.model.auxiliary_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.dataset_type = 'NailsDataset' cfg.data_root = data_root cfg.data.samples_per_gpu = 8 cfg.data.workers_per_gpu = 8 cfg.img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) cfg.crop_size = (256, 256) cfg.train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(320, 240), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])] cfg.test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(320, 240), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] cfg.data.train.type = cfg.dataset_type cfg.data.train.data_root = cfg.data_root cfg.data.train.img_dir = img_dir cfg.data.train.ann_dir = ann_dir cfg.data.train.pipeline = cfg.train_pipeline cfg.data.train.split = 'splits/train.txt' cfg.data.val.type = cfg.dataset_type cfg.data.val.data_root = cfg.data_root cfg.data.val.img_dir = img_dir cfg.data.val.ann_dir = ann_dir cfg.data.val.pipeline = cfg.test_pipeline cfg.data.val.split = 'splits/val.txt' cfg.data.test.type = cfg.dataset_type cfg.data.test.data_root = cfg.data_root cfg.data.test.img_dir = img_dir cfg.data.test.ann_dir = ann_dir cfg.data.test.pipeline = cfg.test_pipeline cfg.data.test.split = 'splits/val.txt' cfg.load_from = 'checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200605_094614-3769eecf.pth' cfg.work_dir = './work_dirs/nails' cfg.optimizer.lr = 0.01 / 8 cfg.runner.max_iters = 600 cfg.log_config.interval = 10 cfg.evaluation.interval = 200 cfg.checkpoint_config.interval = 200 cfg.seed = 1111 set_random_seed(1111, deterministic=False) cfg.gpu_ids = range(1) from mmseg.datasets import build_dataset from mmseg.models import build_segmentor from mmseg.apis import train_segmentor datasets = [build_dataset(cfg.data.train)] model = build_segmentor(cfg.model) model.CLASSES = datasets[0].CLASSES mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) train_segmentor(model, datasets, cfg, distributed=False, validate=True, meta=dict()) img = mmcv.imread('/content/mmsegmentation/nails/images/09aefeec-e05f-11e8-87a6-0242ac1c0002.jpg') model.cfg = cfg result = inference_segmentor(model, img) img = mmcv.imread('/content/mmsegmentation/nails/nails_segmentation/images/d62b0cd8-db67-11e8-9658-0242ac1c0002.jpg') model.cfg = cfg result = inference_segmentor(model, img) plt.figure(figsize=(8, 6)) show_result_pyplot(model, img, result, palette)
code
129012879/cell_20
[ "image_output_1.png" ]
from PIL import Image from mmcv import Config from mmseg.apis import set_random_seed from mmseg.apis import train_segmentor from mmseg.datasets import build_dataset from mmseg.datasets.builder import DATASETS from mmseg.datasets.custom import CustomDataset from mmseg.models import build_segmentor import matplotlib.pyplot as plt import mmcv import numpy as np import os.path as osp import mmcv import matplotlib.pyplot as plt img = mmcv.imread('nails/images/2C29D473-CCB4-458C-926B-99D0042161E6.jpg') import os.path as osp import numpy as np from PIL import Image data_root = 'nails' img_dir = 'images' ann_dir = 'labels' classes = ('background', 'nails') palette = [[0, 0, 0], [225, 25, 38]] for file in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.jpg'): seg_img = Image.open(osp.join(data_root, ann_dir, file)).convert('P') seg_img.putpalette(np.array(palette, dtype=np.uint8)) seg_img.save(osp.join(data_root, ann_dir, file.replace('.jpg', '.png'))) split_dir = 'splits' mmcv.mkdir_or_exist(osp.join(data_root, split_dir)) filename_list = [osp.splitext(filename)[0] for filename in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.png')] with open(osp.join(data_root, split_dir, 'train.txt'), 'w') as f: train_length = int(len(filename_list) * 0.85) f.writelines((line + '\n' for line in filename_list[:train_length])) with open(osp.join(data_root, split_dir, 'val.txt'), 'w') as f: f.writelines((line + '\n' for line in filename_list[train_length:])) from mmseg.datasets.builder import DATASETS from mmseg.datasets.custom import CustomDataset @DATASETS.register_module() class NailsDataset(CustomDataset): CLASSES = classes PALETTE = palette def __init__(self, split, **kwargs): super().__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) assert osp.exists(self.img_dir) and self.split is not None from mmcv import Config cfg = Config.fromfile('configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py ') from mmseg.apis import set_random_seed cfg.norm_cfg = dict(type='BN', requires_grad=True) cfg.model.backbone.norm_cfg = cfg.norm_cfg cfg.model.decode_head.norm_cfg = cfg.norm_cfg cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg cfg.model.decode_head.num_classes = 2 cfg.model.auxiliary_head.num_classes = 2 cfg.model.decode_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.model.auxiliary_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.dataset_type = 'NailsDataset' cfg.data_root = data_root cfg.data.samples_per_gpu = 8 cfg.data.workers_per_gpu = 8 cfg.img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) cfg.crop_size = (256, 256) cfg.train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(320, 240), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])] cfg.test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(320, 240), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] cfg.data.train.type = cfg.dataset_type cfg.data.train.data_root = cfg.data_root cfg.data.train.img_dir = img_dir cfg.data.train.ann_dir = ann_dir cfg.data.train.pipeline = cfg.train_pipeline cfg.data.train.split = 'splits/train.txt' cfg.data.val.type = cfg.dataset_type cfg.data.val.data_root = cfg.data_root cfg.data.val.img_dir = img_dir cfg.data.val.ann_dir = ann_dir cfg.data.val.pipeline = cfg.test_pipeline cfg.data.val.split = 'splits/val.txt' cfg.data.test.type = cfg.dataset_type cfg.data.test.data_root = cfg.data_root cfg.data.test.img_dir = img_dir cfg.data.test.ann_dir = ann_dir cfg.data.test.pipeline = cfg.test_pipeline cfg.data.test.split = 'splits/val.txt' cfg.load_from = 'checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200605_094614-3769eecf.pth' cfg.work_dir = './work_dirs/nails' cfg.optimizer.lr = 0.01 / 8 cfg.runner.max_iters = 600 cfg.log_config.interval = 10 cfg.evaluation.interval = 200 cfg.checkpoint_config.interval = 200 cfg.seed = 1111 set_random_seed(1111, deterministic=False) cfg.gpu_ids = range(1) from mmseg.datasets import build_dataset from mmseg.models import build_segmentor from mmseg.apis import train_segmentor datasets = [build_dataset(cfg.data.train)] model = build_segmentor(cfg.model) model.CLASSES = datasets[0].CLASSES mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) train_segmentor(model, datasets, cfg, distributed=False, validate=True, meta=dict())
code
129012879/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import mmseg import mmseg print(mmseg.__version__)
code
129012879/cell_2
[ "text_plain_output_1.png" ]
# Check nvcc version !nvcc -V # Check GCC version !gcc --version
code
129012879/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import mmcv import mmcv import matplotlib.pyplot as plt img = mmcv.imread('nails/images/2C29D473-CCB4-458C-926B-99D0042161E6.jpg') plt.figure(figsize=(8, 6)) plt.imshow(mmcv.bgr2rgb(img)) plt.show()
code
129012879/cell_19
[ "text_plain_output_1.png" ]
from PIL import Image from mmcv import Config from mmseg.apis import set_random_seed import matplotlib.pyplot as plt import mmcv import numpy as np import os.path as osp import mmcv import matplotlib.pyplot as plt img = mmcv.imread('nails/images/2C29D473-CCB4-458C-926B-99D0042161E6.jpg') import os.path as osp import numpy as np from PIL import Image data_root = 'nails' img_dir = 'images' ann_dir = 'labels' classes = ('background', 'nails') palette = [[0, 0, 0], [225, 25, 38]] for file in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.jpg'): seg_img = Image.open(osp.join(data_root, ann_dir, file)).convert('P') seg_img.putpalette(np.array(palette, dtype=np.uint8)) seg_img.save(osp.join(data_root, ann_dir, file.replace('.jpg', '.png'))) from mmcv import Config cfg = Config.fromfile('configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py ') from mmseg.apis import set_random_seed cfg.norm_cfg = dict(type='BN', requires_grad=True) cfg.model.backbone.norm_cfg = cfg.norm_cfg cfg.model.decode_head.norm_cfg = cfg.norm_cfg cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg cfg.model.decode_head.num_classes = 2 cfg.model.auxiliary_head.num_classes = 2 cfg.model.decode_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.model.auxiliary_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.dataset_type = 'NailsDataset' cfg.data_root = data_root cfg.data.samples_per_gpu = 8 cfg.data.workers_per_gpu = 8 cfg.img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) cfg.crop_size = (256, 256) cfg.train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(320, 240), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])] cfg.test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(320, 240), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] cfg.data.train.type = cfg.dataset_type cfg.data.train.data_root = cfg.data_root cfg.data.train.img_dir = img_dir cfg.data.train.ann_dir = ann_dir cfg.data.train.pipeline = cfg.train_pipeline cfg.data.train.split = 'splits/train.txt' cfg.data.val.type = cfg.dataset_type cfg.data.val.data_root = cfg.data_root cfg.data.val.img_dir = img_dir cfg.data.val.ann_dir = ann_dir cfg.data.val.pipeline = cfg.test_pipeline cfg.data.val.split = 'splits/val.txt' cfg.data.test.type = cfg.dataset_type cfg.data.test.data_root = cfg.data_root cfg.data.test.img_dir = img_dir cfg.data.test.ann_dir = ann_dir cfg.data.test.pipeline = cfg.test_pipeline cfg.data.test.split = 'splits/val.txt' cfg.load_from = 'checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200605_094614-3769eecf.pth' cfg.work_dir = './work_dirs/nails' cfg.optimizer.lr = 0.01 / 8 cfg.runner.max_iters = 600 cfg.log_config.interval = 10 cfg.evaluation.interval = 200 cfg.checkpoint_config.interval = 200 cfg.seed = 1111 set_random_seed(1111, deterministic=False) cfg.gpu_ids = range(1) print(f'Config:\n{cfg.pretty_text}')
code
129012879/cell_1
[ "text_plain_output_1.png" ]
from google.colab import drive from google.colab import drive drive.mount('/content/drive')
code
129012879/cell_18
[ "text_plain_output_1.png" ]
from mmcv import Config from mmcv import Config cfg = Config.fromfile('configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py ') print(cfg.dataset_type)
code
129012879/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
!mkdir checkpoints # !wget https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth -P checkpoints !wget https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes/deeplabv3plus_r101-d8_512x1024_40k_cityscapes_20200605_094614-3769eecf.pth -P checkpoints
code
129012879/cell_15
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np import os.path as osp import mmcv import matplotlib.pyplot as plt img = mmcv.imread('nails/images/2C29D473-CCB4-458C-926B-99D0042161E6.jpg') import os.path as osp import numpy as np from PIL import Image data_root = 'nails' img_dir = 'images' ann_dir = 'labels' classes = ('background', 'nails') palette = [[0, 0, 0], [225, 25, 38]] for file in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.jpg'): seg_img = Image.open(osp.join(data_root, ann_dir, file)).convert('P') seg_img.putpalette(np.array(palette, dtype=np.uint8)) seg_img.save(osp.join(data_root, ann_dir, file.replace('.jpg', '.png'))) import matplotlib.patches as mpatches img = Image.open('/content/mmsegmentation/nails/labels/09aefeec-e05f-11e8-87a6-0242ac1c0002.png') plt.figure(figsize=(8, 6)) im = plt.imshow(np.array(img.convert('RGB'))) patches = [mpatches.Patch(color=np.array(palette[i]) / 255.0, label=classes[i]) for i in range(2)] plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0, fontsize='large') plt.show()
code
129012879/cell_3
[ "text_plain_output_1.png" ]
import torch, torchvision print(torch.__version__, torch.cuda.is_available()) print(torchvision.__version__)
code
129012879/cell_22
[ "text_plain_output_1.png" ]
from PIL import Image from mmcv import Config from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot from mmseg.apis import set_random_seed from mmseg.apis import train_segmentor from mmseg.datasets import build_dataset from mmseg.datasets.builder import DATASETS from mmseg.datasets.custom import CustomDataset from mmseg.models import build_segmentor import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np import os.path as osp import mmcv import matplotlib.pyplot as plt img = mmcv.imread('nails/images/2C29D473-CCB4-458C-926B-99D0042161E6.jpg') import os.path as osp import numpy as np from PIL import Image data_root = 'nails' img_dir = 'images' ann_dir = 'labels' classes = ('background', 'nails') palette = [[0, 0, 0], [225, 25, 38]] for file in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.jpg'): seg_img = Image.open(osp.join(data_root, ann_dir, file)).convert('P') seg_img.putpalette(np.array(palette, dtype=np.uint8)) seg_img.save(osp.join(data_root, ann_dir, file.replace('.jpg', '.png'))) # Let's take a look at the segmentation map we got import matplotlib.patches as mpatches img = Image.open('/content/mmsegmentation/nails/labels/09aefeec-e05f-11e8-87a6-0242ac1c0002.png') plt.figure(figsize=(8, 6)) im = plt.imshow(np.array(img.convert('RGB'))) # create a patch (proxy artist) for every color patches = [mpatches.Patch(color=np.array(palette[i])/255., label=classes[i]) for i in range(2)] # put those patched as legend-handles into the legend plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize='large') plt.show() split_dir = 'splits' mmcv.mkdir_or_exist(osp.join(data_root, split_dir)) filename_list = [osp.splitext(filename)[0] for filename in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.png')] with open(osp.join(data_root, split_dir, 'train.txt'), 'w') as f: train_length = int(len(filename_list) * 0.85) f.writelines((line + '\n' for line in filename_list[:train_length])) with open(osp.join(data_root, split_dir, 'val.txt'), 'w') as f: f.writelines((line + '\n' for line in filename_list[train_length:])) from mmseg.datasets.builder import DATASETS from mmseg.datasets.custom import CustomDataset @DATASETS.register_module() class NailsDataset(CustomDataset): CLASSES = classes PALETTE = palette def __init__(self, split, **kwargs): super().__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) assert osp.exists(self.img_dir) and self.split is not None from mmcv import Config cfg = Config.fromfile('configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py ') from mmseg.apis import set_random_seed cfg.norm_cfg = dict(type='BN', requires_grad=True) cfg.model.backbone.norm_cfg = cfg.norm_cfg cfg.model.decode_head.norm_cfg = cfg.norm_cfg cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg cfg.model.decode_head.num_classes = 2 cfg.model.auxiliary_head.num_classes = 2 cfg.model.decode_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.model.auxiliary_head.loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=0.5), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=0.5)] cfg.dataset_type = 'NailsDataset' cfg.data_root = data_root cfg.data.samples_per_gpu = 8 cfg.data.workers_per_gpu = 8 cfg.img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) cfg.crop_size = (256, 256) cfg.train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(320, 240), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])] cfg.test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(320, 240), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] cfg.data.train.type = cfg.dataset_type cfg.data.train.data_root = cfg.data_root cfg.data.train.img_dir = img_dir cfg.data.train.ann_dir = ann_dir cfg.data.train.pipeline = cfg.train_pipeline cfg.data.train.split = 'splits/train.txt' cfg.data.val.type = cfg.dataset_type cfg.data.val.data_root = cfg.data_root cfg.data.val.img_dir = img_dir cfg.data.val.ann_dir = ann_dir cfg.data.val.pipeline = cfg.test_pipeline cfg.data.val.split = 'splits/val.txt' cfg.data.test.type = cfg.dataset_type cfg.data.test.data_root = cfg.data_root cfg.data.test.img_dir = img_dir cfg.data.test.ann_dir = ann_dir cfg.data.test.pipeline = cfg.test_pipeline cfg.data.test.split = 'splits/val.txt' cfg.load_from = 'checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200605_094614-3769eecf.pth' cfg.work_dir = './work_dirs/nails' cfg.optimizer.lr = 0.01 / 8 cfg.runner.max_iters = 600 cfg.log_config.interval = 10 cfg.evaluation.interval = 200 cfg.checkpoint_config.interval = 200 cfg.seed = 1111 set_random_seed(1111, deterministic=False) cfg.gpu_ids = range(1) from mmseg.datasets import build_dataset from mmseg.models import build_segmentor from mmseg.apis import train_segmentor datasets = [build_dataset(cfg.data.train)] model = build_segmentor(cfg.model) model.CLASSES = datasets[0].CLASSES mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) train_segmentor(model, datasets, cfg, distributed=False, validate=True, meta=dict()) img = mmcv.imread('/content/mmsegmentation/nails/images/09aefeec-e05f-11e8-87a6-0242ac1c0002.jpg') model.cfg = cfg result = inference_segmentor(model, img) plt.figure(figsize=(8, 6)) show_result_pyplot(model, img, result, palette)
code
129012879/cell_10
[ "text_plain_output_1.png" ]
!unzip /content/drive/MyDrive/nails.zip -d /content/mmsegmentation/nails
code
129012879/cell_12
[ "text_plain_output_1.png" ]
# Install tree first !apt-get -q install tree !tree nails
code
129012879/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
!rm -rf mmsegmentation !git clone https://github.com/open-mmlab/mmsegmentation.git !pip install -e .
code
1004507/cell_4
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing import re # regular expressions raw_data = pd.read_csv('../input/train.csv') raw_data = shuffle(raw_data) gender_dic = {'male': 0, 'female': 1} raw_data.ix[:, 4] = raw_data.ix[:, 4].replace(gender_dic) raw_data.fillna(0, inplace=True) raw_data = raw_data.assign(Cabin_count=0) raw_data = raw_data.assign(Cabin_letter=0) raw_data = raw_data.assign(Cabin_number=0) def map_cabin(row): cabin_str = row[10] if cabin_str == 0: return row cabin_parts = cabin_str.split(' ') row[12] = len(cabin_parts) lc = cabin_parts[len(cabin_parts) - 1] cl = re.findall('[A-Z]', lc) cn = re.findall('\\d+', lc) if len(cl) > 0: row[13] = ord(cl[0]) else: row[13] = 0 if len(cn) > 0: row[14] = int(cn[0]) else: row[14] = 0 return row raw_data = raw_data.apply(map_cabin, axis=1) data_X = raw_data.ix[:, [2, 4, 5, 6, 7, 9, 12, 13, 14]] data_y = raw_data.ix[:, 1] poly = PolynomialFeatures(2) data_X = poly.fit_transform(data_X) msk = np.random.rand(len(data_X)) < 0.8 train_X = data_X[msk] train_y = data_y[msk] cv_X = data_X[~msk] cv_y = data_y[~msk] pd.DataFrame(data=data_X).head()
code
1004507/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # plotting import numpy as np # linear algebra import pandas as pd # data processing import re # regular expressions raw_data = pd.read_csv('../input/train.csv') raw_data = shuffle(raw_data) gender_dic = {'male': 0, 'female': 1} raw_data.ix[:, 4] = raw_data.ix[:, 4].replace(gender_dic) raw_data.fillna(0, inplace=True) raw_data = raw_data.assign(Cabin_count=0) raw_data = raw_data.assign(Cabin_letter=0) raw_data = raw_data.assign(Cabin_number=0) def map_cabin(row): cabin_str = row[10] if cabin_str == 0: return row cabin_parts = cabin_str.split(' ') row[12] = len(cabin_parts) lc = cabin_parts[len(cabin_parts) - 1] cl = re.findall('[A-Z]', lc) cn = re.findall('\\d+', lc) if len(cl) > 0: row[13] = ord(cl[0]) else: row[13] = 0 if len(cn) > 0: row[14] = int(cn[0]) else: row[14] = 0 return row raw_data = raw_data.apply(map_cabin, axis=1) data_X = raw_data.ix[:, [2, 4, 5, 6, 7, 9, 12, 13, 14]] data_y = raw_data.ix[:, 1] poly = PolynomialFeatures(2) data_X = poly.fit_transform(data_X) msk = np.random.rand(len(data_X)) < 0.8 train_X = data_X[msk] train_y = data_y[msk] cv_X = data_X[~msk] cv_y = data_y[~msk] pca = PCA(n_components=2) t = pca.fit_transform(data_X) t_x, t_y = t.T plt.scatter(t_x, t_y, c=data_y) plt.show()
code
1004507/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import learning_curve import matplotlib.pyplot as plt # plotting import numpy as np # linear algebra import pandas as pd # data processing import re # regular expressions raw_data = pd.read_csv('../input/train.csv') raw_data = shuffle(raw_data) gender_dic = {'male': 0, 'female': 1} raw_data.ix[:, 4] = raw_data.ix[:, 4].replace(gender_dic) raw_data.fillna(0, inplace=True) raw_data = raw_data.assign(Cabin_count=0) raw_data = raw_data.assign(Cabin_letter=0) raw_data = raw_data.assign(Cabin_number=0) def map_cabin(row): cabin_str = row[10] if cabin_str == 0: return row cabin_parts = cabin_str.split(' ') row[12] = len(cabin_parts) lc = cabin_parts[len(cabin_parts) - 1] cl = re.findall('[A-Z]', lc) cn = re.findall('\\d+', lc) if len(cl) > 0: row[13] = ord(cl[0]) else: row[13] = 0 if len(cn) > 0: row[14] = int(cn[0]) else: row[14] = 0 return row raw_data = raw_data.apply(map_cabin, axis=1) data_X = raw_data.ix[:, [2, 4, 5, 6, 7, 9, 12, 13, 14]] data_y = raw_data.ix[:, 1] poly = PolynomialFeatures(2) data_X = poly.fit_transform(data_X) msk = np.random.rand(len(data_X)) < 0.8 train_X = data_X[msk] train_y = data_y[msk] cv_X = data_X[~msk] cv_y = data_y[~msk] pca = PCA(n_components=2) t = pca.fit_transform(data_X) t_x, t_y = t.T est = SVC(C=0.2) ts = [10, 25, 50, 100, 200, 300, 400, 445] train_sizes, train_scores, valid_scores = learning_curve(est, data_X, data_y, train_sizes=ts, cv=2) plt.plot(train_sizes, np.mean(train_scores, axis=1)) plt.plot(train_sizes, np.mean(valid_scores, axis=1)) plt.show()
code
1004507/cell_10
[ "text_html_output_1.png" ]
from sklearn.model_selection import learning_curve import matplotlib.pyplot as plt # plotting import numpy as np # linear algebra import pandas as pd # data processing import re # regular expressions raw_data = pd.read_csv('../input/train.csv') raw_data = shuffle(raw_data) gender_dic = {'male': 0, 'female': 1} raw_data.ix[:, 4] = raw_data.ix[:, 4].replace(gender_dic) raw_data.fillna(0, inplace=True) raw_data = raw_data.assign(Cabin_count=0) raw_data = raw_data.assign(Cabin_letter=0) raw_data = raw_data.assign(Cabin_number=0) def map_cabin(row): cabin_str = row[10] if cabin_str == 0: return row cabin_parts = cabin_str.split(' ') row[12] = len(cabin_parts) lc = cabin_parts[len(cabin_parts) - 1] cl = re.findall('[A-Z]', lc) cn = re.findall('\\d+', lc) if len(cl) > 0: row[13] = ord(cl[0]) else: row[13] = 0 if len(cn) > 0: row[14] = int(cn[0]) else: row[14] = 0 return row raw_data = raw_data.apply(map_cabin, axis=1) data_X = raw_data.ix[:, [2, 4, 5, 6, 7, 9, 12, 13, 14]] data_y = raw_data.ix[:, 1] poly = PolynomialFeatures(2) data_X = poly.fit_transform(data_X) msk = np.random.rand(len(data_X)) < 0.8 train_X = data_X[msk] train_y = data_y[msk] cv_X = data_X[~msk] cv_y = data_y[~msk] pca = PCA(n_components=2) t = pca.fit_transform(data_X) t_x, t_y = t.T est = SVC(C=0.2) ts = [10, 25, 50, 100, 200, 300, 400, 445] train_sizes, train_scores, valid_scores = learning_curve(est, data_X, data_y, train_sizes=ts, cv=2) est.fit(train_X, train_y) predict = est.predict(cv_X) compare_y = cv_y.as_matrix() sim = np.sum(predict == compare_y) / len(predict) print(sim, '%')
code
1008970/cell_6
[ "image_output_1.png" ]
silly_thresh_value = 55 thresh_image = bone_image > silly_thresh_value fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) ax1.imshow(bone_image, cmap='bone') ax1.set_title('Original Image') ax2.imshow(thresh_image, cmap='jet') ax2.set_title('Thresheld Image')
code
1008970/cell_3
[ "image_output_1.png" ]
from skimage.io import imread bone_image = imread('../input/bone.tif') print('Loading bone image shape: {}'.format(bone_image.shape))
code
1008970/cell_12
[ "text_plain_output_1.png" ]
threshold_list = [10, 20, 200] fig, m_ax = plt.subplots(2, len(threshold_list), figsize=(15, 6)) for c_thresh, (c_ax1, c_ax2) in zip(threshold_list, m_ax.T): bone_thresh = bone_image > c_thresh c_ax1.imshow(bone_thresh, cmap='jet') c_ax1.set_title('Bone @ {}, Image'.format(c_thresh)) c_ax1.axis('off') cell_thresh = bone_image < c_thresh c_ax2.imshow(cell_thresh, cmap='jet') c_ax2.set_title('Cell @ {}, Image'.format(c_thresh)) c_ax2.axis('off')
code
1008970/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) ax1.imshow(bone_image, cmap='bone') _ = ax2.hist(bone_image.ravel(), 20)
code
33122071/cell_25
[ "text_html_output_2.png" ]
from plotly.subplots import make_subplots import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go import translators as ts df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _1gram = ngrams_top(df['content'], (1, 1), n=10) english_1gram = [] for i in range(10): english_1gram.append(ts.google(_1gram['text'][i], 'auto', 'en')) fig = make_subplots(rows=1, cols=2) fig.add_trace(go.Bar(x=_1gram['count'][::-1], y=_1gram['text'][::-1], name='Indonesian', marker_color='rgb(55, 83, 109)', orientation='h'), row=1, col=1) fig.add_trace(go.Bar(x=_1gram['count'][::-1], y=english_1gram[::-1], name='English', marker_color='rgb(5, 3, 243)', orientation='h'), row=1, col=2) fig.update_layout(height=600, width=800, title_text='1 grams for Indonesian/English') fig.show()
code
33122071/cell_23
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _1gram = ngrams_top(df['content'], (1, 1), n=10)
code
33122071/cell_29
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go import translators as ts df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _1gram = ngrams_top(df['content'], (1, 1), n=10) english_1gram = [] for i in range(10): english_1gram.append(ts.google(_1gram['text'][i], 'auto', 'en')) fig = make_subplots(rows=1, cols=2) fig.add_trace( go.Bar(x=_1gram['count'][::-1], y=_1gram['text'][::-1], name='Indonesian', marker_color='rgb(55, 83, 109)', orientation='h' ), row=1, col=1 ) fig.add_trace( go.Bar(x=_1gram['count'][::-1], y=english_1gram[::-1], name='English', marker_color='rgb(5, 3, 243)', orientation='h' ), row=1, col=2 ) fig.update_layout(height=600, width=800, title_text="1 grams for Indonesian/English") fig.show() _2gram = ngrams_top(df['content'], (2, 2), n=10) english_2gram = [] for i in range(10): english_2gram.append(ts.google(_2gram['text'][i], 'auto', 'en')) fig = make_subplots(rows=1, cols=2) fig.add_trace(go.Bar(x=_2gram['count'][::-1], y=_2gram['text'][::-1], name='Indonesian', marker_color='rgb(55, 244, 181)', orientation='h'), row=1, col=1) fig.add_trace(go.Bar(x=_2gram['count'][::-1], y=english_2gram[::-1], name='English', marker_color='rgb(58, 45, 121)', orientation='h'), row=1, col=2) fig.update_layout(height=600, width=1400, title_text='2 grams for Indonesian/English') fig.show()
code
33122071/cell_19
[ "text_plain_output_1.png" ]
!pip install translators
code
33122071/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33122071/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df.describe()
code
33122071/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go import translators as ts df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _1gram = ngrams_top(df['content'], (1, 1), n=10) english_1gram = [] for i in range(10): english_1gram.append(ts.google(_1gram['text'][i], 'auto', 'en')) _2gram = ngrams_top(df['content'], (2, 2), n=10) english_2gram = [] for i in range(10): english_2gram.append(ts.google(_2gram['text'][i], 'auto', 'en'))
code
33122071/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df.head()
code
33122071/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion')
code
33122071/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) df_new.head()
code
33122071/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') fig.show()
code
33122071/cell_31
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go import translators as ts df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _1gram = ngrams_top(df['content'], (1, 1), n=10) english_1gram = [] for i in range(10): english_1gram.append(ts.google(_1gram['text'][i], 'auto', 'en')) _2gram = ngrams_top(df['content'], (2, 2), n=10) english_2gram = [] for i in range(10): english_2gram.append(ts.google(_2gram['text'][i], 'auto', 'en')) _3gram = ngrams_top(df['content'], (3, 3), n=10) english_3gram = [] for i in range(10): english_3gram.append(ts.google(_3gram['text'][i], 'auto', 'en'))
code
33122071/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go import translators as ts df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _1gram = ngrams_top(df['content'], (1, 1), n=10) english_1gram = [] for i in range(10): english_1gram.append(ts.google(_1gram['text'][i], 'auto', 'en'))
code
33122071/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape
code
33122071/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df) def count_values_in_column(data, feature): total = data.loc[:, feature].value_counts(dropna=False) percentage = round(data.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) count_values_in_column(df, 'emotion') name_of_emotion = df.emotion.unique() value_of_emotion = list(df.emotion.value_counts()) fig = go.Figure([go.Bar(x=list(name_of_emotion), y=value_of_emotion)]) fig.update_traces(textposition='outside') fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') def ngrams_top(corpus, ngram_range, n=None): """ List the top n words in a vocabulary according to occurrence in a text corpus. """ vec = CountVectorizer(stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) total_list = words_freq[:n] df = pd.DataFrame(total_list, columns=['text', 'count']) return df _2gram = ngrams_top(df['content'], (2, 2), n=10)
code
33122071/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df['len'] = df['content'].astype(str).apply(len) df = df.drop_duplicates(subset='content', keep='first') df.shape def missing_value_of_data(data): total = data.isnull().sum().sort_values(ascending=False) percentage = round(total / data.shape[0] * 100, 2) return pd.concat([total, percentage], axis=1, keys=['Total', 'Percentage']) missing_value_of_data(df)
code
33122071/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_new = pd.read_json('/kaggle/input/indonesiandata/emotion_id_tweets.json', lines=True) ex_label = [] for i in range(df_new['annotation'].shape[0]): if df_new['annotation'][i]['labels'] == []: ex_label.append('no_emotion') else: ex_label.append(df_new.annotation[i]['labels'][0]) df = pd.DataFrame() df['content'] = df_new['content'] df['emotion'] = ex_label df.head()
code
34130888/cell_6
[ "text_plain_output_1.png" ]
import cv2 import os import pandas as pd in_part1 = set(os.listdir('../input/skin-cancer-mnist-ham10000/ham10000_images_part_1')) df = pd.read_csv('../input/skin-cancer-mnist-ham10000/HAM10000_metadata.csv') x_train = [] y_train = [] lesions = set() for index, row in df.iterrows(): if row['lesion_id'] in lesions: continue lesions.add(row['lesion_id']) path = '../input/skin-cancer-mnist-ham10000/' if row['image_id'] + '.jpg' in in_part1: path += 'ham10000_images_part_1/' + row['image_id'] + '.jpg' else: path += 'ham10000_images_part_2/' + row['image_id'] + '.jpg' img = cv2.imread(path) img = cv2.resize(img, (100, 75)) x_train.append(img) y_train.append(row['dx']) y_train = pd.get_dummies(y_train) y_train.head()
code
34130888/cell_11
[ "text_html_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten , Conv2D , MaxPool2D from keras.models import Sequential input_shape = (75, 100, 3) num_classes = 7 model = Sequential() model.add(Flatten(input_shape=input_shape)) model.add(Dense(512, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary()
code
34130888/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import os import cv2 import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras import backend as K from keras.layers.normalization import BatchNormalization from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau from sklearn.model_selection import train_test_split
code
34130888/cell_8
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os import pandas as pd in_part1 = set(os.listdir('../input/skin-cancer-mnist-ham10000/ham10000_images_part_1')) df = pd.read_csv('../input/skin-cancer-mnist-ham10000/HAM10000_metadata.csv') x_train = [] y_train = [] lesions = set() for index, row in df.iterrows(): if row['lesion_id'] in lesions: continue lesions.add(row['lesion_id']) path = '../input/skin-cancer-mnist-ham10000/' if row['image_id'] + '.jpg' in in_part1: path += 'ham10000_images_part_1/' + row['image_id'] + '.jpg' else: path += 'ham10000_images_part_2/' + row['image_id'] + '.jpg' img = cv2.imread(path) img = cv2.resize(img, (100, 75)) x_train.append(img) y_train.append(row['dx']) y_train = pd.get_dummies(y_train) y_train = np.asarray(y_train) mean = np.mean(x_train) std = np.std(x_train) x_train = (x_train - mean) / std print(x_train.shape) print(y_train.shape)
code
34130888/cell_15
[ "text_plain_output_1.png" ]
from keras.callbacks import ReduceLROnPlateau from keras.layers import Dense, Dropout, Flatten , Conv2D , MaxPool2D from keras.models import Sequential from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator import cv2 import numpy as np import os import pandas as pd in_part1 = set(os.listdir('../input/skin-cancer-mnist-ham10000/ham10000_images_part_1')) df = pd.read_csv('../input/skin-cancer-mnist-ham10000/HAM10000_metadata.csv') x_train = [] y_train = [] lesions = set() for index, row in df.iterrows(): if row['lesion_id'] in lesions: continue lesions.add(row['lesion_id']) path = '../input/skin-cancer-mnist-ham10000/' if row['image_id'] + '.jpg' in in_part1: path += 'ham10000_images_part_1/' + row['image_id'] + '.jpg' else: path += 'ham10000_images_part_2/' + row['image_id'] + '.jpg' img = cv2.imread(path) img = cv2.resize(img, (100, 75)) x_train.append(img) y_train.append(row['dx']) y_train = pd.get_dummies(y_train) y_train = np.asarray(y_train) mean = np.mean(x_train) std = np.std(x_train) x_train = (x_train - mean) / std input_shape = (75, 100, 3) num_classes = 7 model = Sequential() model.add(Flatten(input_shape=input_shape)) model.add(Dense(512, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.summary() optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=1e-05) datagen = ImageDataGenerator(rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1) datagen.fit(x_train) epochs = 50 batch_size = 128 history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(x_validate, y_validate), verbose=1, callbacks=[learning_rate_reduction], shuffle=True)
code
16120004/cell_9
[ "text_plain_output_1.png" ]
from PIL import Image import cv2 import matplotlib.pyplot as plt import numpy as np import os infectados = os.listdir('../input/cell_images/cell_images/Parasitized/') saudaveis = os.listdir('../input/cell_images/cell_images/Uninfected/') data = [] labels = [] for i in infectados: try: image = cv2.imread('../input/cell_images/cell_images/Parasitized/' + i) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) blur = cv2.blur(np.array(resize_img), (10, 10)) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) data.append(np.array(blur)) labels.append(1) labels.append(1) labels.append(1) labels.append(1) except AttributeError: for s in saudaveis: try: image = cv2.imread('../input/cell_images/cell_images/Uninfected/' + s) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) labels.append(0) labels.append(0) labels.append(0) except AttributeError: celulas = np.array(data) classes = np.array(labels) import matplotlib.pyplot as plt n = 0 for i in range(49): n += 1 r = np.random.randint(0, celulas.shape[0], 1) (plt.xticks([]), plt.yticks([])) plt.figure(1, figsize=(15, 7)) plt.subplot(1, 2, 1) plt.imshow(celulas[0]) plt.title('Infectada') (plt.xticks([]), plt.yticks([])) plt.subplot(1, 2, 2) plt.imshow(celulas[60000]) plt.title('Saudavel') (plt.xticks([]), plt.yticks([])) plt.show()
code
16120004/cell_4
[ "image_output_1.png" ]
import os print(os.listdir('../input/cell_images/cell_images'))
code
16120004/cell_6
[ "text_plain_output_1.png" ]
from PIL import Image import cv2 import numpy as np import os infectados = os.listdir('../input/cell_images/cell_images/Parasitized/') saudaveis = os.listdir('../input/cell_images/cell_images/Uninfected/') data = [] labels = [] for i in infectados: try: image = cv2.imread('../input/cell_images/cell_images/Parasitized/' + i) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) blur = cv2.blur(np.array(resize_img), (10, 10)) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) data.append(np.array(blur)) labels.append(1) labels.append(1) labels.append(1) labels.append(1) except AttributeError: print('') for s in saudaveis: try: image = cv2.imread('../input/cell_images/cell_images/Uninfected/' + s) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) labels.append(0) labels.append(0) labels.append(0) except AttributeError: print('')
code
16120004/cell_8
[ "text_plain_output_1.png" ]
from PIL import Image import cv2 import matplotlib.pyplot as plt import numpy as np import os infectados = os.listdir('../input/cell_images/cell_images/Parasitized/') saudaveis = os.listdir('../input/cell_images/cell_images/Uninfected/') data = [] labels = [] for i in infectados: try: image = cv2.imread('../input/cell_images/cell_images/Parasitized/' + i) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) blur = cv2.blur(np.array(resize_img), (10, 10)) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) data.append(np.array(blur)) labels.append(1) labels.append(1) labels.append(1) labels.append(1) except AttributeError: for s in saudaveis: try: image = cv2.imread('../input/cell_images/cell_images/Uninfected/' + s) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) labels.append(0) labels.append(0) labels.append(0) except AttributeError: celulas = np.array(data) classes = np.array(labels) import matplotlib.pyplot as plt plt.figure(1, figsize=(15, 9)) n = 0 for i in range(49): n += 1 r = np.random.randint(0, celulas.shape[0], 1) plt.subplot(7, 7, n) plt.subplots_adjust(hspace=0.5, wspace=0.5) plt.imshow(celulas[r[0]]) plt.title('{} : {}'.format('Infectados' if classes[r[0]] == 1 else 'Saudaveis', classes[r[0]])) (plt.xticks([]), plt.yticks([])) plt.show()
code
16120004/cell_15
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout from keras.models import Sequential model = Sequential() model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50, 50, 3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=128, kernel_size=2, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(2, activation='sigmoid')) model.summary()
code
16120004/cell_3
[ "image_output_1.png" ]
from PIL import Image import numpy as np import os import cv2 import keras from keras.utils import np_utils from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
code
16120004/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image import cv2 import keras import matplotlib.pyplot as plt import numpy as np import os infectados = os.listdir('../input/cell_images/cell_images/Parasitized/') saudaveis = os.listdir('../input/cell_images/cell_images/Uninfected/') data = [] labels = [] for i in infectados: try: image = cv2.imread('../input/cell_images/cell_images/Parasitized/' + i) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) blur = cv2.blur(np.array(resize_img), (10, 10)) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) data.append(np.array(blur)) labels.append(1) labels.append(1) labels.append(1) labels.append(1) except AttributeError: for s in saudaveis: try: image = cv2.imread('../input/cell_images/cell_images/Uninfected/' + s) image_array = Image.fromarray(image, 'RGB') resize_img = image_array.resize((50, 50)) rotated30 = resize_img.rotate(30) rotated60 = resize_img.rotate(60) data.append(np.array(resize_img)) data.append(np.array(rotated30)) data.append(np.array(rotated60)) labels.append(0) labels.append(0) labels.append(0) except AttributeError: celulas = np.array(data) classes = np.array(labels) import matplotlib.pyplot as plt n = 0 for i in range(49): n += 1 r = np.random.randint(0, celulas.shape[0], 1) (plt.xticks([]), plt.yticks([])) np.random.seed(0) n = np.arange(celulas.shape[0]) np.random.shuffle(n) cells = celulas[n] labels = classes[n] cells = cells.astype(np.float32) labels = labels.astype(np.int32) cells = cells / 255 num_classes = len(np.unique(labels)) len_data = len(cells) y_tr = keras.utils.to_categorical(y_tr, num_classes) y_val = keras.utils.to_categorical(y_val, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) print(y_tr.shape, y_val.shape, X_tr.shape, X_val.shape)
code
1004704/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import tree from sklearn import tree clf = tree.DecisionTreeClassifier() clf.fit(xtrain, ytrain) clf.score(xtest, ytest)
code
1004704/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import tree from sklearn import tree clf = tree.DecisionTreeClassifier() clf.fit(xtrain, ytrain)
code
16136701/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.windspeed.plot(kind='box')
code
16136701/cell_9
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum()
code
16136701/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr() train_X = dataf.drop(columns=['temp', 'casual', 'registered']) model = LinearRegression() a = cross_val_score(model, train_X, train_Y, cv=5, scoring='neg_mean_squared_error')
code
16136701/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.info()
code
16136701/cell_30
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr() train_X = dataf.drop(columns=['temp', 'casual', 'registered']) train1_Y = dataf['casual'] train2_Y = dataf['registered'] train3_Y = dataf['count'] model = LinearRegression() model.fit(train_X, train1_Y) dtrain_predictions = model.predict(train_X) model.fit(X_train, Y_train)
code
16136701/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum()
code
16136701/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr() train_X = dataf.drop(columns=['temp', 'casual', 'registered']) model = LinearRegression() a = cross_val_score(model, train_X, train_Y, cv=5, scoring='neg_mean_squared_error') np.mean(np.sqrt(np.abs(a)))
code
16136701/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf['temp'].unique()
code
16136701/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16136701/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape
code
16136701/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr()
code
16136701/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.registered.plot(kind='box')
code
16136701/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf['count'].plot(kind='box')
code
16136701/cell_38
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr() train_X = dataf.drop(columns=['temp', 'casual', 'registered']) for i in dataf.columns: sns.pairplot(data=dataf, x_vars=i, y_vars='count')
code
16136701/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.head()
code
16136701/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf['count'].value_counts()
code
16136701/cell_35
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, r2_score import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr() train_X = dataf.drop(columns=['temp', 'casual', 'registered']) train1_Y = dataf['casual'] train2_Y = dataf['registered'] train3_Y = dataf['count'] model = LinearRegression() model.fit(train_X, train1_Y) dtrain_predictions = model.predict(train_X) model.fit(X_train, Y_train) train_predict = model.predict(X_train) test_predict = model.predict(X_test) print('Train:', mean_absolute_error(Y_train, train_predict)) print('Test:', mean_absolute_error(Y_test, test_predict))
code
16136701/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model
code
16136701/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.casual.plot(kind='box')
code
16136701/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum()
code
16136701/cell_12
[ "text_html_output_1.png" ]
dataf.season.plot(kind='box')
code
16136701/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T
code
16136701/cell_36
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, r2_score import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.corr() train_X = dataf.drop(columns=['temp', 'casual', 'registered']) train1_Y = dataf['casual'] train2_Y = dataf['registered'] train3_Y = dataf['count'] model = LinearRegression() model.fit(train_X, train1_Y) dtrain_predictions = model.predict(train_X) model.fit(X_train, Y_train) train_predict = model.predict(X_train) test_predict = model.predict(X_test) print('r2 train', r2_score(Y_train, train_predict)) print('r2 test', r2_score(Y_test, test_predict))
code
74052344/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd pd.set_option.max_rows = 50 import numpy as np train_path = '/kaggle/input/house-prices-advanced-regression-techniques/train.csv' test_path = '/kaggle/input/house-prices-advanced-regression-techniques/test.csv' df_train = pd.read_csv(train_path) df_test = pd.read_csv(test_path) print('shape of train set', df_train.shape) print('shape of test set ', df_test.shape) list_col_train = list(df_train.columns) print('Train features : ', list_col_test) list_col_test = list(df_test.columns) print('test Features :', list_col_test)
code
74052344/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code