laureBe commited on
Commit
0f3f04a
Β·
verified Β·
1 Parent(s): 93dcf23

Update tasks/text.py

Browse files
Files changed (1) hide show
  1. tasks/text.py +26 -19
tasks/text.py CHANGED
@@ -9,9 +9,13 @@ import pandas as pd
9
  import tensorflow as tf
10
  from transformers import DistilBertTokenizer
11
  from transformers import TFDistilBertForSequenceClassification
12
- from tensorflow.keras.models import load_model
 
 
13
  from .utils.evaluation import TextEvaluationRequest
14
  from .utils.emissions import tracker, clean_emissions_data, get_space_info
 
 
15
 
16
  router = APIRouter()
17
 
@@ -65,13 +69,14 @@ async def evaluate_text(request: TextEvaluationRequest):
65
  test_dataset["subsource"])], columns=['quote','source', 'subsource'])
66
  tt.fillna("",inplace=True)
67
  tn.fillna("",inplace=True)
 
68
  tn['text'] = tn[['quote', 'source','subsource']].agg(' '.join, axis=1)
69
  tt['text'] = tn[['quote', 'source','subsource']].agg(' '.join, axis=1)
70
 
71
  def clean_text(x):
72
- pattern = r'[^a-zA-z0-9\s]'
73
- text = re.sub(pattern, '', x)
74
- return x
75
 
76
  def clean_numbers(x):
77
  if bool(re.search(r'\d', x)):
@@ -82,26 +87,34 @@ async def evaluate_text(request: TextEvaluationRequest):
82
  return x
83
 
84
  contraction_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have"}
 
85
  def _get_contractions(contraction_dict):
86
  contraction_re = re.compile('(%s)' % '|'.join(contraction_dict.keys()))
87
  return contraction_dict, contraction_re
 
88
  contractions, contractions_re = _get_contractions(contraction_dict)
 
89
  def replace_contractions(text):
90
  def replace(match):
91
  return contractions[match.group(0)]
92
  return contractions_re.sub(replace, text)
 
93
  train_dataset_df = tn['quote'].apply(lambda x: x.lower())
94
  test_dataset_df = tt['quote'].apply(lambda x: x.lower())
 
95
  # Clean the text
96
  train_dataset_df = train_dataset_df.apply(lambda x: clean_text(x))
97
  test_dataset_df= test_dataset_df.apply(lambda x: clean_text(x))
 
98
  # Clean numbers
99
  train_dataset_df= train_dataset_df.apply(lambda x: clean_numbers(x))
100
  test_dataset_df = test_dataset_df.apply(lambda x: clean_numbers(x))
 
101
  # Clean Contractions
102
  train_dataset_df = train_dataset_df.apply(lambda x: replace_contractions(x))
103
  test_dataset_df = test_dataset_df.apply(lambda x: replace_contractions(x))
104
-
 
105
  y_train_df=pd.DataFrame(train_dataset['label'], columns=['label'])
106
  y_test_df=pd.DataFrame(test_dataset['label'], columns=['label'])
107
  y_train_encoded = y_train_df['label'].astype('category').cat.codes
@@ -109,11 +122,12 @@ async def evaluate_text(request: TextEvaluationRequest):
109
  train_labels = y_train_encoded.to_list()
110
  test_labels=y_test_encoded.to_list()
111
 
 
112
  tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
113
  train_encodings = tokenizer(train_dataset_df.to_list(), truncation=True, padding=True)
114
  val_encodings = tokenizer(test_dataset_df.to_list(), truncation=True, padding=True)
115
 
116
-
117
  train_dataset_bert = tf.data.Dataset.from_tensor_slices((
118
  dict(train_encodings),
119
  train_labels
@@ -123,12 +137,13 @@ async def evaluate_text(request: TextEvaluationRequest):
123
  test_labels
124
  ))
125
 
 
 
126
 
127
  # Start tracking emissions
128
  tracker.start()
129
  tracker.start_task("inference")
130
- model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=8)
131
-
132
  optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5, epsilon=1e-08)
133
  model.compile(optimizer=optimizer, loss=model.hf_compute_loss, metrics=['accuracy'])
134
  #--------------------------------------------------------------------------------------------
@@ -140,7 +155,7 @@ async def evaluate_text(request: TextEvaluationRequest):
140
 
141
  early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
142
 
143
- model.fit(train_dataset_bert.shuffle(1000).batch(16),epochs=3,batch_size=16,validation_data=val_dataset_bert.shuffle(1000).batch(16),callbacks=[early_stopping])
144
  #--------------------------------------------------------------------------------------------
145
  # YOUR MODEL INFERENCE STOPS HERE
146
  #--------------------------------------------------------------------------------------------
@@ -149,22 +164,14 @@ async def evaluate_text(request: TextEvaluationRequest):
149
  # Stop tracking emissions
150
  emissions_data = tracker.stop_task()
151
 
152
-
153
- save_directory = "BERT" # Change this to your preferred location
154
-
155
- model.save_pretrained(save_directory)
156
- tokenizer.save_pretrained(save_directory)
157
- save_directory = "BERT"
158
- loaded_tokenizer = DistilBertTokenizer.from_pretrained(save_directory)
159
- loaded_model = TFDistilBertForSequenceClassification.from_pretrained(save_directory)
160
 
161
  # Calculate accuracy
162
  def predict_category(text):
163
- predict_input = loaded_tokenizer.encode(text,
164
  truncation=True,
165
  padding=True,
166
  return_tensors="tf")
167
- output = loaded_model(predict_input)[0]
168
  prediction_value = tf.argmax(output, axis=1).numpy()[0]
169
  return prediction_value
170
  #β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š- -
 
9
  import tensorflow as tf
10
  from transformers import DistilBertTokenizer
11
  from transformers import TFDistilBertForSequenceClassification
12
+ from transformers import logging
13
+ logging.set_verbosity_error()
14
+ logging.set_verbosity_warning()
15
  from .utils.evaluation import TextEvaluationRequest
16
  from .utils.emissions import tracker, clean_emissions_data, get_space_info
17
+ import os
18
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
19
 
20
  router = APIRouter()
21
 
 
69
  test_dataset["subsource"])], columns=['quote','source', 'subsource'])
70
  tt.fillna("",inplace=True)
71
  tn.fillna("",inplace=True)
72
+
73
  tn['text'] = tn[['quote', 'source','subsource']].agg(' '.join, axis=1)
74
  tt['text'] = tn[['quote', 'source','subsource']].agg(' '.join, axis=1)
75
 
76
  def clean_text(x):
77
+ pattern = r'[^a-zA-z0-9\s]'
78
+ text = re.sub(pattern, '', x)
79
+ return x
80
 
81
  def clean_numbers(x):
82
  if bool(re.search(r'\d', x)):
 
87
  return x
88
 
89
  contraction_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have"}
90
+
91
  def _get_contractions(contraction_dict):
92
  contraction_re = re.compile('(%s)' % '|'.join(contraction_dict.keys()))
93
  return contraction_dict, contraction_re
94
+
95
  contractions, contractions_re = _get_contractions(contraction_dict)
96
+
97
  def replace_contractions(text):
98
  def replace(match):
99
  return contractions[match.group(0)]
100
  return contractions_re.sub(replace, text)
101
+
102
  train_dataset_df = tn['quote'].apply(lambda x: x.lower())
103
  test_dataset_df = tt['quote'].apply(lambda x: x.lower())
104
+
105
  # Clean the text
106
  train_dataset_df = train_dataset_df.apply(lambda x: clean_text(x))
107
  test_dataset_df= test_dataset_df.apply(lambda x: clean_text(x))
108
+
109
  # Clean numbers
110
  train_dataset_df= train_dataset_df.apply(lambda x: clean_numbers(x))
111
  test_dataset_df = test_dataset_df.apply(lambda x: clean_numbers(x))
112
+
113
  # Clean Contractions
114
  train_dataset_df = train_dataset_df.apply(lambda x: replace_contractions(x))
115
  test_dataset_df = test_dataset_df.apply(lambda x: replace_contractions(x))
116
+
117
+ # Encoding
118
  y_train_df=pd.DataFrame(train_dataset['label'], columns=['label'])
119
  y_test_df=pd.DataFrame(test_dataset['label'], columns=['label'])
120
  y_train_encoded = y_train_df['label'].astype('category').cat.codes
 
122
  train_labels = y_train_encoded.to_list()
123
  test_labels=y_test_encoded.to_list()
124
 
125
+ # Tokenize
126
  tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
127
  train_encodings = tokenizer(train_dataset_df.to_list(), truncation=True, padding=True)
128
  val_encodings = tokenizer(test_dataset_df.to_list(), truncation=True, padding=True)
129
 
130
+ # Slicing
131
  train_dataset_bert = tf.data.Dataset.from_tensor_slices((
132
  dict(train_encodings),
133
  train_labels
 
137
  test_labels
138
  ))
139
 
140
+ model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=8)
141
+
142
 
143
  # Start tracking emissions
144
  tracker.start()
145
  tracker.start_task("inference")
146
+
 
147
  optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5, epsilon=1e-08)
148
  model.compile(optimizer=optimizer, loss=model.hf_compute_loss, metrics=['accuracy'])
149
  #--------------------------------------------------------------------------------------------
 
155
 
156
  early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
157
 
158
+ model.fit(train_dataset_bert.shuffle(1000).batch(16),epochs=2,batch_size=16,validation_data=val_dataset_bert.shuffle(1000).batch(16),callbacks=[early_stopping])
159
  #--------------------------------------------------------------------------------------------
160
  # YOUR MODEL INFERENCE STOPS HERE
161
  #--------------------------------------------------------------------------------------------
 
164
  # Stop tracking emissions
165
  emissions_data = tracker.stop_task()
166
 
 
 
 
 
 
 
 
 
167
 
168
  # Calculate accuracy
169
  def predict_category(text):
170
+ predict_input =tokenizer.encode(text,
171
  truncation=True,
172
  padding=True,
173
  return_tensors="tf")
174
+ output = model(predict_input)[0]
175
  prediction_value = tf.argmax(output, axis=1).numpy()[0]
176
  return prediction_value
177
  #β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š-β€Š- -