path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
121149216/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv') mo.shape mo.columns mo.dtypes mo.isnull().sum() viz_data = mo.copy(True) viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False) pvt = mo.pivot_table(index='model_name', values='sellers_amount', aggfunc=['min', 'mean', 'max', 'sum', 'std', 'count']) pvt
code
121149216/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv') mo.shape mo.columns mo.dtypes mo.isnull().sum() viz_data = mo.copy(True) viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False) mo['best_price'].max()
code
121149216/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) mo = pd.read_csv('../input/ukrainian-market-mobile-phones-data/phones_data.csv') mo.shape mo.columns mo.dtypes mo.isnull().sum() viz_data = mo.copy(True) viz_data['release_date'].value_counts(normalize=True).sort_values(ascending=False) viz_data['release_date'].value_counts() viz_data['release_date'].hist() plt.xlabel('Year') plt.ylabel('Number of releases') plt.show()
code
49127755/cell_4
[ "text_plain_output_1.png" ]
import tensorflow as tf model = tf.keras.models.load_model('../input/cassava-abhinay/model') model.summary()
code
49127755/cell_6
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255) test_dir = '../input/cassava-leaf-disease-classification/test_images/' test = pd.DataFrame() test['image_id'] = os.listdir('../input/cassava-leaf-disease-classification/test_images/') test_generator = test_datagen_v2.flow_from_dataframe(test, directory=test_dir, x_col='image_id', target_size=(448, 448), batch_size=1, class_mode=None, shuffle=False)
code
49127755/cell_8
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow as tf model = tf.keras.models.load_model('../input/cassava-abhinay/model') model.summary() test_datagen_v2 = ImageDataGenerator(rescale=1.0 / 255) test_dir = '../input/cassava-leaf-disease-classification/test_images/' test = pd.DataFrame() test['image_id'] = os.listdir('../input/cassava-leaf-disease-classification/test_images/') test_generator = test_datagen_v2.flow_from_dataframe(test, directory=test_dir, x_col='image_id', target_size=(448, 448), batch_size=1, class_mode=None, shuffle=False) labels = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4} test_generator.reset() pred = model.predict_generator(test_generator, verbose=1, steps=len(test)) predicted_class_indices = np.argmax(pred, axis=1) labels = dict(((v, k) for k, v in labels.items())) predictions = [labels[k] for k in predicted_class_indices] filenames = test_generator.filenames results = pd.DataFrame({'image_id': filenames, 'label': predictions})
code
130011540/cell_21
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20)
code
130011540/cell_25
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) arr_2_d[3, 4]
code
130011540/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np print(np.version) print(np.__version__)
code
130011540/cell_34
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d)
code
130011540/cell_23
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) print(arr_2_d)
code
130011540/cell_40
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d) arr = np.arange(1, 11) bool_arr = arr < 5 result_arr = arr[bool_arr] result_arr = arr[arr < 5] print(result_arr)
code
130011540/cell_29
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) arr_2_d[:3, 1:2]
code
130011540/cell_39
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d) arr = np.arange(1, 11) bool_arr = arr < 5 result_arr = arr[bool_arr] print(result_arr)
code
130011540/cell_19
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10)
code
130011540/cell_45
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d) arr = np.arange(1, 11) data = np.zeros((8, 8), dtype=int) for i in range(8): for j in range(8): if i % 2 == 0: if j % 2 == 0: data[i, j] = 1 elif j % 2: data[i, j] = 1 data = np.random.randint(1, 5, size=(10, 10)) print(data) print() print('Min:', np.min(data)) print('Max:', np.max(data))
code
130011540/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100
code
130011540/cell_32
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1)
code
130011540/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) print(arr_10to50)
code
130011540/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25)
code
130011540/cell_38
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d) arr = np.arange(1, 11) bool_arr = arr < 5 print(bool_arr)
code
130011540/cell_43
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d) arr = np.arange(1, 11) data = np.zeros((8, 8), dtype=int) for i in range(8): for j in range(8): if i % 2 == 0: if j % 2 == 0: data[i, j] = 1 elif j % 2: data[i, j] = 1 print(data)
code
130011540/cell_31
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d)
code
130011540/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1)
code
130011540/cell_10
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) print(arr_10to50_even)
code
130011540/cell_27
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) arr_2_d[:3, 1:2]
code
130011540/cell_37
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3) np.random.rand(1) np.random.randn(25) np.arange(1, 101).reshape(10, 10) / 100 np.linspace(0.01, 1, 100).reshape(10, 10) np.linspace(0, 1, 20) arr_2_d = np.arange(1, 26).reshape(5, 5) np.sum(arr_2_d) np.sum(arr_2_d, axis=0) np.sum(arr_2_d, axis=1) np.std(arr_2_d) arr = np.arange(1, 11) print(arr)
code
130011540/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np arr_10to50 = np.arange(10, 51) arr_10to50_even = np.arange(10, 51, 2) np.arange(1, 10).reshape(3, 3)
code
50234943/cell_9
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(n_estimators=1000, random_state=42) rf.fit(train_features, train_labels)
code
50234943/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd features = pd.read_csv('../input/tempscsv/temps.csv') features = pd.get_dummies(features) features.iloc[:, 5:].head(5)
code
50234943/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd features = pd.read_csv('../input/tempscsv/temps.csv') print('The shape of our features is:', features.shape)
code
50234943/cell_11
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor import numpy as np import pandas as pd import pandas as pd features = pd.read_csv('../input/tempscsv/temps.csv') features = pd.get_dummies(features) import numpy as np labels = np.array(features['actual']) features = features.drop('actual', axis=1) feature_list = list(features.columns) features = np.array(features) from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(n_estimators=1000, random_state=42) rf.fit(train_features, train_labels) predictions = rf.predict(test_features) errors = abs(predictions - test_labels) mape = 100 * (errors / test_labels) accuracy = 100 - np.mean(mape) print('Accuracy:', round(accuracy, 2), '%.')
code
50234943/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd features = pd.read_csv('../input/tempscsv/temps.csv') features.head(5)
code
50234943/cell_7
[ "text_plain_output_1.png" ]
print('Training Features Shape:', train_features.shape) print('Training Labels Shape:', train_labels.shape) print('Testing Features Shape:', test_features.shape) print('Testing Labels Shape:', test_labels.shape)
code
50234943/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd features = pd.read_csv('../input/tempscsv/temps.csv') features.describe()
code
50234943/cell_10
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor import numpy as np import pandas as pd import pandas as pd features = pd.read_csv('../input/tempscsv/temps.csv') features = pd.get_dummies(features) import numpy as np labels = np.array(features['actual']) features = features.drop('actual', axis=1) feature_list = list(features.columns) features = np.array(features) from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(n_estimators=1000, random_state=42) rf.fit(train_features, train_labels) predictions = rf.predict(test_features) errors = abs(predictions - test_labels) print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
code
130016222/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datasets import load_dataset, Features, Value, Dataset from transformers import pipeline import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dftrn = pd.read_csv('../input/nlp-getting-started/train.csv') dftes = pd.read_csv('../input/nlp-getting-started/test.csv') dfsub = pd.read_csv('../input/nlp-getting-started/sample_submission.csv') dstrn = Dataset.from_pandas(dftrn, split='train') dstes = Dataset.from_pandas(dftes, split='test') clsfr = pipeline('text-classification', device=0) restrn = clsfr(dstrn['text']) pd.DataFrame(restrn)['label'].apply(lambda x: 1 if x == 'NEGATIVE' else 0).value_counts() (pd.DataFrame(restrn)['label'].apply(lambda x: 1 if x == 'NEGATIVE' else 0) == dstrn['target']).sum()
code
130016222/cell_2
[ "text_plain_output_1.png" ]
!ls ../input/nlp-getting-started
code
130016222/cell_11
[ "text_plain_output_1.png" ]
from datasets import load_dataset, Features, Value, Dataset from transformers import pipeline import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dftrn = pd.read_csv('../input/nlp-getting-started/train.csv') dftes = pd.read_csv('../input/nlp-getting-started/test.csv') dfsub = pd.read_csv('../input/nlp-getting-started/sample_submission.csv') dstrn = Dataset.from_pandas(dftrn, split='train') dstes = Dataset.from_pandas(dftes, split='test') clsfr = pipeline('text-classification', device=0) restrn = clsfr(dstrn['text']) pd.DataFrame(restrn)['label'].apply(lambda x: 1 if x == 'NEGATIVE' else 0).value_counts()
code
130016222/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130016222/cell_18
[ "text_plain_output_1.png" ]
!ls
code
130016222/cell_8
[ "text_plain_output_1.png" ]
from transformers import pipeline clsfr = pipeline('text-classification', device=0)
code
130016222/cell_15
[ "text_plain_output_1.png" ]
from datasets import load_dataset, Features, Value, Dataset from transformers import pipeline import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dftrn = pd.read_csv('../input/nlp-getting-started/train.csv') dftes = pd.read_csv('../input/nlp-getting-started/test.csv') dfsub = pd.read_csv('../input/nlp-getting-started/sample_submission.csv') dstrn = Dataset.from_pandas(dftrn, split='train') dstes = Dataset.from_pandas(dftes, split='test') clsfr = pipeline('text-classification', device=0) restrn = clsfr(dstrn['text']) restes = clsfr(dstes['text']) pd.DataFrame(restrn)['label'].apply(lambda x: 1 if x == 'NEGATIVE' else 0).value_counts() (pd.DataFrame(restrn)['label'].apply(lambda x: 1 if x == 'NEGATIVE' else 0) == dstrn['target']).sum() preds = pd.DataFrame(restes)['label'].apply(lambda x: 1 if x == 'NEGATIVE' else 0) preds
code
130016222/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd from datasets import load_dataset, Features, Value, Dataset from transformers import pipeline import numpy as np import torch
code
130016222/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datasets import load_dataset, Features, Value, Dataset from transformers import pipeline import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dftrn = pd.read_csv('../input/nlp-getting-started/train.csv') dftes = pd.read_csv('../input/nlp-getting-started/test.csv') dfsub = pd.read_csv('../input/nlp-getting-started/sample_submission.csv') dstrn = Dataset.from_pandas(dftrn, split='train') dstes = Dataset.from_pandas(dftes, split='test') clsfr = pipeline('text-classification', device=0) restrn = clsfr(dstrn['text']) len(restrn)
code
106201490/cell_21
[ "text_plain_output_1.png" ]
from vncorenlp import VnCoreNLP import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin') import re def format_sentence(sentence): sentence = re.sub(f'[{string.punctuation}\\d\n]', '', sentence) tokens = vncorenlp.tokenize(sentence.lower())[0] sentence = ' '.join(tokens) embedding_sentence = cbow_pretrain.get_sentence_vector(sentence) return embedding_sentence """ lấy phần text trong dataframe: df[tên cột]: pandas.core.series.Series df[tên cột].values: numpy.ndarray """ corpus = df['text'].values.tolist() corpus = list(map(format_sentence, corpus)) print('Num_samples:', len(corpus))
code
106201490/cell_9
[ "text_plain_output_1.png" ]
from vncorenlp import VnCoreNLP import numpy as np # linear algebra from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') print(vncorenlp_example) print(np.array(vncorenlp_example).shape)
code
106201490/cell_25
[ "text_plain_output_1.png" ]
from vncorenlp import VnCoreNLP import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin') import re def format_sentence(sentence): sentence = re.sub(f'[{string.punctuation}\\d\n]', '', sentence) tokens = vncorenlp.tokenize(sentence.lower())[0] sentence = ' '.join(tokens) embedding_sentence = cbow_pretrain.get_sentence_vector(sentence) return embedding_sentence """ lấy phần text trong dataframe: df[tên cột]: pandas.core.series.Series df[tên cột].values: numpy.ndarray """ corpus = df['text'].values.tolist() corpus = list(map(format_sentence, corpus)) X = np.array(corpus) Config.word_vec = X.shape[1] max_len_sent = max([len(sent) for sent in corpus]) Config.max_sequence_length = max_len_sent print(Config.max_sequence_length)
code
106201490/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) print(df.shape) df.head()
code
106201490/cell_34
[ "text_plain_output_1.png" ]
from sklearn.neural_network import MLPClassifier (X_train.shape, y_train.shape, X_test.shape, y_test.shape) from sklearn.neural_network import MLPClassifier mlp_classifier = MLPClassifier(learning_rate_init=0.0001, random_state=0, max_iter=300, hidden_layer_sizes=(256, 64)).fit(X_train, y_train) print('Evaluate:') print(f'train score: {mlp_classifier.score(X_train, y_train)}') print(f'test socre: {mlp_classifier.score(X_test, y_test)}')
code
106201490/cell_23
[ "text_plain_output_1.png" ]
!pip install tqdm
code
106201490/cell_33
[ "text_plain_output_1.png" ]
from sklearn.neural_network import MLPClassifier (X_train.shape, y_train.shape, X_test.shape, y_test.shape) from sklearn.neural_network import MLPClassifier mlp_classifier = MLPClassifier(learning_rate_init=0.0001, random_state=0, max_iter=300, hidden_layer_sizes=(256, 64)).fit(X_train, y_train)
code
106201490/cell_6
[ "text_plain_output_1.png" ]
with open('../input/vietnamese-stopwords/vietnamese-stopwords.txt', 'r') as f: stop_words = f.read().split('\n') token_stop_words = ['_'.join(stop_word.split()) for stop_word in stop_words] print(token_stop_words[:10])
code
106201490/cell_39
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import Perceptron (X_train.shape, y_train.shape, X_test.shape, y_test.shape) (X_train.shape, y_train.shape, X_test.shape, y_test.shape) from sklearn.linear_model import Perceptron perceptron = Perceptron(tol=2e-05, random_state=0) perceptron.fit(X_train, y_train) print('Evaluate:') print(f'train score: {perceptron.score(X_train, y_train)}') print(f'test socre: {perceptron.score(X_test, y_test)}')
code
106201490/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) labels = df['label'].values print(labels.shape)
code
106201490/cell_2
[ "text_plain_output_1.png" ]
import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 print(Config.seed)
code
106201490/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106201490/cell_7
[ "text_plain_output_1.png" ]
# !pip3 install underthesea !pip install vncorenlp
code
106201490/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) print(df.head())
code
106201490/cell_32
[ "text_plain_output_1.png" ]
(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
code
106201490/cell_28
[ "text_plain_output_1.png" ]
import collections import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) labels = df['label'].values value_labels = list(set(labels)) import collections count_labels = collections.Counter(labels) observe_data = pd.DataFrame({'value_labels': value_labels, 'num_labels': [count_labels[value] for value in value_labels]}) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_axes([0.5, 0.5, 1, 1]) plt.bar(observe_data.value_labels, observe_data.num_labels, color='green') plt.title('Thống kê số lượng trong tập data') plt.show()
code
106201490/cell_16
[ "text_html_output_1.png", "text_plain_output_1.png" ]
!pip install underthesea
code
106201490/cell_38
[ "text_plain_output_1.png" ]
(X_train.shape, y_train.shape, X_test.shape, y_test.shape) (X_train.shape, y_train.shape, X_test.shape, y_test.shape)
code
106201490/cell_3
[ "text_plain_output_1.png" ]
import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 print(string.punctuation)
code
106201490/cell_17
[ "text_plain_output_1.png" ]
from underthesea import word_tokenize from vncorenlp import VnCoreNLP import numpy as np # linear algebra import re import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin') import re def format_sentence(sentence): sentence = re.sub(f'[{string.punctuation}\\d\n]', '', sentence) tokens = vncorenlp.tokenize(sentence.lower())[0] sentence = ' '.join(tokens) embedding_sentence = cbow_pretrain.get_sentence_vector(sentence) return embedding_sentence from underthesea import word_tokenize '\nSự khác biệt giữa vnnlpcore và underthesea:\n - Shape\n - vncorenlp sử dụng gạch dưới để token từ\n' print(vncorenlp.tokenize('thậm chí ở đó chỉ có dây cáp nguồn')) print(word_tokenize('thậm chí ở đó chỉ có dây cáp nguồn'))
code
106201490/cell_35
[ "text_plain_output_1.png" ]
from tqdm import tqdm from vncorenlp import VnCoreNLP import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin') import re def format_sentence(sentence): sentence = re.sub(f'[{string.punctuation}\\d\n]', '', sentence) tokens = vncorenlp.tokenize(sentence.lower())[0] sentence = ' '.join(tokens) embedding_sentence = cbow_pretrain.get_sentence_vector(sentence) return embedding_sentence """ lấy phần text trong dataframe: df[tên cột]: pandas.core.series.Series df[tên cột].values: numpy.ndarray """ corpus = df['text'].values.tolist() corpus = list(map(format_sentence, corpus)) X = np.array(corpus) Config.word_vec = X.shape[1] labels = df['label'].values Y = [] X = [] for idx in tqdm(range(len(labels))): if labels[idx] == 0 or labels[idx] == 2: Y.append(labels[idx]) X.append(corpus[idx])
code
106201490/cell_14
[ "text_plain_output_1.png" ]
with open('../input/vietnamese-stopwords/vietnamese-stopwords.txt', 'r') as f: stop_words = f.read().split('\n') token_stop_words = ['_'.join(stop_word.split()) for stop_word in stop_words] print(token_stop_words[:10])
code
106201490/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from vncorenlp import VnCoreNLP import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin') import re def format_sentence(sentence): sentence = re.sub(f'[{string.punctuation}\\d\n]', '', sentence) tokens = vncorenlp.tokenize(sentence.lower())[0] sentence = ' '.join(tokens) embedding_sentence = cbow_pretrain.get_sentence_vector(sentence) return embedding_sentence """ lấy phần text trong dataframe: df[tên cột]: pandas.core.series.Series df[tên cột].values: numpy.ndarray """ corpus = df['text'].values.tolist() corpus = list(map(format_sentence, corpus)) X = np.array(corpus) Config.word_vec = X.shape[1] print(Config.word_vec)
code
106201490/cell_27
[ "text_plain_output_1.png" ]
import collections import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) labels = df['label'].values value_labels = list(set(labels)) print(value_labels) import collections count_labels = collections.Counter(labels) observe_data = pd.DataFrame({'value_labels': value_labels, 'num_labels': [count_labels[value] for value in value_labels]}) print(observe_data)
code
106201490/cell_12
[ "text_plain_output_1.png" ]
import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin')
code
106201490/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
with open('../input/vietnamese-stopwords/vietnamese-stopwords.txt', 'r') as f: stop_words = f.read().split('\n') print(len(stop_words)) print(stop_words[:10])
code
106201490/cell_36
[ "image_output_1.png" ]
from tqdm import tqdm from vncorenlp import VnCoreNLP import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import string class Config: max_sequence_length = -1 word_vec = -1 punctuations = f'[{string.punctuation}\\d\n]' seed = 44 data_path = '../input/vietnamese-text-classification-dataset/train.csv' df = pd.read_csv(data_path, header=None, names=['label', 'text']) with open('../input/vietnamese-stopwords/vietnamese-stopwords.txt', 'r') as f: stop_words = f.read().split('\n') from vncorenlp import VnCoreNLP vncorenlp = VnCoreNLP('../input/vncorenlp/VnCoreNLP-1.1.1.jar', annotators='wseg,pos,ner,parse', max_heap_size='-Xmx2g') vncorenlp_example = vncorenlp.tokenize('nắng chiếu lung linh trên hoa vàng') import fasttext.util cbow_pretrain = fasttext.load_model('../input/cbow-model/cc.vi.300.bin') import re def format_sentence(sentence): sentence = re.sub(f'[{string.punctuation}\\d\n]', '', sentence) tokens = vncorenlp.tokenize(sentence.lower())[0] sentence = ' '.join(tokens) embedding_sentence = cbow_pretrain.get_sentence_vector(sentence) return embedding_sentence """ lấy phần text trong dataframe: df[tên cột]: pandas.core.series.Series df[tên cột].values: numpy.ndarray """ corpus = df['text'].values.tolist() corpus = list(map(format_sentence, corpus)) X = np.array(corpus) Config.word_vec = X.shape[1] max_len_sent = max([len(sent) for sent in corpus]) Config.max_sequence_length = max_len_sent labels = df['label'].values class GenData: def __init__(self, stop_words, num_sentence, list_sentence): self.stop_words = stop_words self.num_sentence = num_sentence self.list_sentence = list_sentence def __gen_new_item__(self): num_sentece_gen = len(self.list_sentence) - self.num_sentence np.random.seed(Config.seed) np.random.shuffle(self.stop_words) stop_words_gen = self.stop_words[:num_sentence_gen] np.random.shuffle(self.list_sentence) sentence_gen = self.list_sentence[:num_sentence_gen] new_sentences = [[stop_words_gen[idx]] + sentence_gen[idx] for idx in range(num_sentencce_gen)] return new_sentences Y = [] X = [] for idx in tqdm(range(len(labels))): if labels[idx] == 0 or labels[idx] == 2: Y.append(labels[idx]) X.append(corpus[idx]) X = np.array(X) Y = np.array(Y) (X.shape, Y.shape)
code
74054244/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() data_zom.rename(columns={'Known_For2': 'Restaurant speciality', 'Known_For22': 'Customer feedback'}, inplace=True) data_zom = data_zom.drop(['Latitude', 'Longitude'], axis=1) data_zom.isnull().sum() data_zom.fillna(data_zom.select_dtypes(include='object').mode().iloc[0], inplace=True) data_zom = data_zom.fillna(data_zom.mean()) plt.hist(data_zom['Dining_Rating']) plt.title('Average rating') plt.show()
code
74054244/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() data_zom.rename(columns={'Known_For2': 'Restaurant speciality', 'Known_For22': 'Customer feedback'}, inplace=True) data_zom = data_zom.drop(['Latitude', 'Longitude'], axis=1) data_zom['Locality'].value_counts()
code
74054244/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() print('The numerical feaures are ', num_fea) print('The categorical features are ', cat_fea)
code
74054244/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() data_zom.rename(columns={'Known_For2': 'Restaurant speciality', 'Known_For22': 'Customer feedback'}, inplace=True) data_zom.head()
code
74054244/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() data_zom.rename(columns={'Known_For2': 'Restaurant speciality', 'Known_For22': 'Customer feedback'}, inplace=True) data_zom = data_zom.drop(['Latitude', 'Longitude'], axis=1) data_zom.isnull().sum() data_zom.fillna(data_zom.select_dtypes(include='object').mode().iloc[0], inplace=True) data_zom = data_zom.fillna(data_zom.mean()) plt.hist(data_zom['Pricing_for_2']) plt.title('Average price for two') plt.show()
code
74054244/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74054244/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') data_zom.info()
code
74054244/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() data_zom.rename(columns={'Known_For2': 'Restaurant speciality', 'Known_For22': 'Customer feedback'}, inplace=True) data_zom = data_zom.drop(['Latitude', 'Longitude'], axis=1) data_zom.isnull().sum()
code
74054244/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') num_fea = data_zom.select_dtypes(include=['int', 'float']).columns.to_list() cat_fea = data_zom.select_dtypes(include='object').columns.to_list() data_zom.rename(columns={'Known_For2': 'Restaurant speciality', 'Known_For22': 'Customer feedback'}, inplace=True) data_zom = data_zom.drop(['Latitude', 'Longitude'], axis=1) data_zom.isnull().sum() data_zom.fillna(data_zom.select_dtypes(include='object').mode().iloc[0], inplace=True) data_zom = data_zom.fillna(data_zom.mean()) data_zom['Pricing_for_2'].describe()
code
74054244/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_zom = pd.read_csv('/kaggle/input/zomato-restaurants-in-delhi-ncr/DelhiNCR Restaurants.csv') print('Size of dataset is: ', data_zom.shape) data_zom.head()
code
34150823/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import os import random import torch import warnings import os import cv2 from PIL import Image import time import copy import warnings import random import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from torch.optim.lr_scheduler import ReduceLROnPlateau from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.nn import functional as F import torchvision import torch.optim as optim import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader, Dataset, sampler from matplotlib import pyplot as plt import torchvision.transforms as transforms from albumentations import HorizontalFlip, VerticalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise, RandomRotate90, Transpose, RandomBrightnessContrast, RandomCrop from albumentations.pytorch import ToTensor import albumentations as albu import matplotlib.image as mpi from sklearn.metrics import f1_score warnings.filterwarnings('ignore') seed = 69 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device
code
34150823/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.shape
code
34150823/cell_20
[ "text_plain_output_1.png" ]
from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.utils.data import DataLoader, Dataset, sampler from torchvision import models import numpy as np import os import pandas as pd import random import time import torch import torch.nn as nn import warnings import os import cv2 from PIL import Image import time import copy import warnings import random import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from torch.optim.lr_scheduler import ReduceLROnPlateau from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.nn import functional as F import torchvision import torch.optim as optim import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader, Dataset, sampler from matplotlib import pyplot as plt import torchvision.transforms as transforms from albumentations import HorizontalFlip, VerticalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise, RandomRotate90, Transpose, RandomBrightnessContrast, RandomCrop from albumentations.pytorch import ToTensor import albumentations as albu import matplotlib.image as mpi from sklearn.metrics import f1_score warnings.filterwarnings('ignore') seed = 69 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.shape device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device testdata = DogandCat(test_df, phase='val') testloader = DataLoader(testdata, batch_size=16) from torchvision import models resnet = models.resnet18(pretrained=True, progress=True) for param in resnet.parameters(): param.requires_grad = False fc_inputs = resnet.fc.in_features resnet.fc = nn.Linear(fc_inputs, 37) from torch.optim import lr_scheduler criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(resnet.fc.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001) scheduler = ReduceLROnPlateau(optimizer, factor=0.33, mode='min', patience=2) resnet.load_state_dict(torch.load('/kaggle/input/oxford-iiit/best_weights.pth')) resnet.to(device) corrects = 0 since = time.time() with torch.no_grad(): for inputs, labels in testloader: inputs = inputs.to(device) labels = labels.to(device) output = resnet(inputs) _, pred = torch.max(output, 1) corrects += torch.sum(pred == labels.data) accuracy = corrects.cpu().numpy() / len(testloader.dataset) time_elapsed = time.time() - since print('Accuracy: {:4f}'.format(accuracy))
code
34150823/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.shape classes = labels['breed'].unique() classes
code
34150823/cell_19
[ "text_html_output_1.png" ]
from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.utils.data import DataLoader, Dataset, sampler from torchvision import models import numpy as np import os import pandas as pd import random import time import torch import torch.nn as nn import warnings import os import cv2 from PIL import Image import time import copy import warnings import random import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from torch.optim.lr_scheduler import ReduceLROnPlateau from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.nn import functional as F import torchvision import torch.optim as optim import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader, Dataset, sampler from matplotlib import pyplot as plt import torchvision.transforms as transforms from albumentations import HorizontalFlip, VerticalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise, RandomRotate90, Transpose, RandomBrightnessContrast, RandomCrop from albumentations.pytorch import ToTensor import albumentations as albu import matplotlib.image as mpi from sklearn.metrics import f1_score warnings.filterwarnings('ignore') seed = 69 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.shape device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device testdata = DogandCat(test_df, phase='val') testloader = DataLoader(testdata, batch_size=16) from torchvision import models resnet = models.resnet18(pretrained=True, progress=True) for param in resnet.parameters(): param.requires_grad = False fc_inputs = resnet.fc.in_features resnet.fc = nn.Linear(fc_inputs, 37) from torch.optim import lr_scheduler criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(resnet.fc.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001) scheduler = ReduceLROnPlateau(optimizer, factor=0.33, mode='min', patience=2) resnet.load_state_dict(torch.load('/kaggle/input/oxford-iiit/best_weights.pth')) resnet.to(device) corrects = 0 since = time.time() with torch.no_grad(): for inputs, labels in testloader: inputs = inputs.to(device) labels = labels.to(device) output = resnet(inputs) _, pred = torch.max(output, 1) corrects += torch.sum(pred == labels.data) accuracy = corrects.cpu().numpy() / len(testloader.dataset) print() time_elapsed = time.time() - since print('Run complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
code
34150823/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.shape df1 = labels['breed'] df2 = labels['label_id'] df1 = pd.get_dummies(df1) df = pd.concat([df2, df1], axis=1) df.head()
code
34150823/cell_18
[ "text_plain_output_1.png" ]
from torch.optim.lr_scheduler import ReduceLROnPlateau from torchvision import models import numpy as np import os import random import torch import torch.nn as nn import warnings import os import cv2 from PIL import Image import time import copy import warnings import random import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from torch.optim.lr_scheduler import ReduceLROnPlateau from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.nn import functional as F import torchvision import torch.optim as optim import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader, Dataset, sampler from matplotlib import pyplot as plt import torchvision.transforms as transforms from albumentations import HorizontalFlip, VerticalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise, RandomRotate90, Transpose, RandomBrightnessContrast, RandomCrop from albumentations.pytorch import ToTensor import albumentations as albu import matplotlib.image as mpi from sklearn.metrics import f1_score warnings.filterwarnings('ignore') seed = 69 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device from torchvision import models resnet = models.resnet18(pretrained=True, progress=True) for param in resnet.parameters(): param.requires_grad = False fc_inputs = resnet.fc.in_features resnet.fc = nn.Linear(fc_inputs, 37) from torch.optim import lr_scheduler criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(resnet.fc.parameters(), lr=0.001, betas=(0.9, 0.999), weight_decay=0.001) scheduler = ReduceLROnPlateau(optimizer, factor=0.33, mode='min', patience=2) resnet.load_state_dict(torch.load('/kaggle/input/oxford-iiit/best_weights.pth')) resnet.to(device)
code
34150823/cell_15
[ "text_plain_output_1.png" ]
from torchvision import models from torchvision import models resnet = models.resnet18(pretrained=True, progress=True)
code
34150823/cell_3
[ "image_output_1.png" ]
import pandas as pd labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.head()
code
34150823/cell_14
[ "text_html_output_1.png" ]
from albumentations.pytorch import ToTensor from torch.utils.data import DataLoader, Dataset, sampler import albumentations as albu import numpy as np import os import pandas as pd import random import torch import warnings import os import cv2 from PIL import Image import time import copy import warnings import random import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from torch.optim.lr_scheduler import ReduceLROnPlateau from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.nn import functional as F import torchvision import torch.optim as optim import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader, Dataset, sampler from matplotlib import pyplot as plt import torchvision.transforms as transforms from albumentations import HorizontalFlip, VerticalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise, RandomRotate90, Transpose, RandomBrightnessContrast, RandomCrop from albumentations.pytorch import ToTensor import albumentations as albu import matplotlib.image as mpi from sklearn.metrics import f1_score warnings.filterwarnings('ignore') seed = 69 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True labels = pd.read_csv('/kaggle/input/oxford-iiit/labels') labels.shape df1 = labels['breed'] df2 = labels['label_id'] df1 = pd.get_dummies(df1) df = pd.concat([df2, df1], axis=1) class DogandCat(Dataset): def __init__(self, df, phase): self.phase = phase self.df = df if phase == 'train': self.transforms = albu.Compose([albu.SmallestMaxSize(256), albu.RandomCrop(256, 256), albu.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ToTensor()]) elif phase == 'val': self.transforms = albu.Compose([albu.Resize(256, 256), albu.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ToTensor()]) def __len__(self): return len(self.df) def __getitem__(self, index): label = self.df.iloc[index, 1:] label = label.to_numpy() label = np.argmax(label) path = self.df.iloc[index, 0] img = plt.imread(path) img = self.transforms(image=np.array(img)) img = img['image'] return (img, label) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) def im_show(img): npimg = img.numpy().transpose((1, 2, 0)) * std + mean npimg = np.clip(npimg, 0.0, 1.0) plt.imshow(npimg) fig = plt.figure(figsize=(18, 5)) for i in np.arange(16): ax = fig.add_subplot(2, 8, i + 1, xticks=[], yticks=[]) im_show(img[i])
code
32065763/cell_9
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1') alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None) alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'}) id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None) id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'}) data.HS.value_counts() data.Abusive.value_counts()
code
32065763/cell_2
[ "text_plain_output_1.png" ]
!pip install PySastrawi
code
32065763/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1') alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None) alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'}) id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None) id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'}) data.HS.value_counts() data.Abusive.value_counts() print('Shape: ', data.shape) data.head(15)
code
32065763/cell_7
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1') alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None) alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'}) id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None) id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'}) print('Shape: ', data.shape) data.head(15)
code
32065763/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1') alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None) alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'}) id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None) id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'}) data.HS.value_counts()
code
32065763/cell_16
[ "text_plain_output_1.png" ]
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory import pandas as pd import re data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1') alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None) alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'}) id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None) id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'}) import re from Sastrawi.Stemmer.StemmerFactory import StemmerFactory factory = StemmerFactory() stemmer = factory.create_stemmer() def lowercase(text): return text.lower() def remove_unnecessary_char(text): text = re.sub('\n', ' ', text) text = re.sub('rt', ' ', text) text = re.sub('user', ' ', text) text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+)|(http?://[^\\s]+))', ' ', text) text = re.sub(' +', ' ', text) return text def remove_nonaplhanumeric(text): text = re.sub('[^0-9a-zA-Z]+', ' ', text) return text alay_dict_map = dict(zip(alay_dict['original'], alay_dict['replacement'])) def normalize_alay(text): return ' '.join([alay_dict_map[word] if word in alay_dict_map else word for word in text.split(' ')]) def remove_stopword(text): text = ' '.join(['' if word in id_stopword_dict.stopword.values else word for word in text.split(' ')]) text = re.sub(' +', ' ', text) text = text.strip() return text def stemming(text): return stemmer.stem(text) print('remove_nonaplhanumeric: ', remove_nonaplhanumeric('Halooo,,,,, duniaa!!')) print('lowercase: ', lowercase('Halooo, duniaa!')) print('stemming: ', stemming('Perekonomian Indonesia sedang dalam pertumbuhan yang membanggakan')) print('remove_unnecessary_char: ', remove_unnecessary_char('Hehe\n\n RT USER USER apa kabs www.google.com\n hehe')) print('normalize_alay: ', normalize_alay('aamiin adek abis')) print('remove_stopword: ', remove_stopword('ada hehe adalah huhu yang hehe'))
code
32065763/cell_3
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd !ls '../input'
code
32065763/cell_14
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/data.csv', encoding='latin-1') alay_dict = pd.read_csv('../input/indonesian-abusive-and-hate-speech-twitter-text/new_kamusalay.csv', encoding='latin-1', header=None) alay_dict = alay_dict.rename(columns={0: 'original', 1: 'replacement'}) id_stopword_dict = pd.read_csv('../input/indonesian-stoplist/stopwordbahasa.csv', header=None) id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'}) print('Shape: ', id_stopword_dict.shape) id_stopword_dict.head()
code