Jainesh212 commited on
Commit
3bb2e98
·
1 Parent(s): 6187777

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -217
app.py CHANGED
@@ -1,227 +1,59 @@
1
  import streamlit as st
2
  import pandas as pd
3
- import numpy as np
4
  import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- from torch.utils.data import Dataset, DataLoader
8
- from transformers import AutoTokenizer,AutoModel
9
- import random
10
- from bs4 import BeautifulSoup
11
- import re
12
 
13
- from transformers import AutoModelForSequenceClassification
14
- import pytorch_lightning as pl
 
15
 
16
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
 
17
 
18
- train_path = "train.csv"
19
- test_path = "test.csv"
20
- test_labels_paths = "test_labels.csv"
21
- test_df = pd.read_csv(test_path)
22
- test_labels_df = pd.read_csv(test_labels_paths)
23
- test_df = pd.concat([test_df.iloc[:, 1], test_labels_df.iloc[:, 1:]], axis = 1)
24
- test_df.to_csv("test-dataset.csv")
25
- test_dataset_path = "test-dataset.csv"
26
 
27
- def healthy_filter(df):
28
- if (df["toxic"]==0) and (df["severe_toxic"]==0) and (df["obscene"]==0) and (df["threat"]==0) and (df["insult"]==0) and (df["identity_hate"]==0):
29
- return 1
30
- else:
31
- return 0
32
-
33
- attributes = ['toxic', 'severe_toxic', 'obscene', 'threat',
34
- 'insult', 'identity_hate', 'healthy']
35
-
36
- class Comments_Dataset(Dataset):
37
- def __init__(self, data_path, tokenizer, attributes, max_token_len = 128, sample=5000):
38
- self.data_path = data_path
39
- self.tokenizer = tokenizer
40
- self.attributes = attributes
41
- self.max_token_len = max_token_len
42
- self.sample = sample
43
- self._prepare_data()
44
-
45
- def _prepare_data(self):
46
- data = pd.read_csv(self.data_path)
47
- data["healthy"] = data.apply(healthy_filter,axis=1)
48
- data["unhealthy"] = np.where(data['healthy']==1, 0, 1)
49
- if self.sample is not None:
50
- unhealthy = data.loc[data["healthy"] == 0]
51
- healthy = data.loc[data["healthy"] ==1]
52
- self.data = pd.concat([unhealthy, healthy.sample(self.sample, random_state=42)])
53
- else:
54
- self.data = data
55
-
56
- def __len__(self):
57
- return len(self.data)
58
-
59
- def __getitem__(self,index):
60
- item = self.data.iloc[index]
61
- comment = str(item.comment_text)
62
- attributes = torch.FloatTensor(item[self.attributes])
63
- tokens = self.tokenizer.encode_plus(comment,
64
- add_special_tokens=True,
65
- return_tensors='pt',
66
- truncation=True,
67
- padding='max_length',
68
- max_length=self.max_token_len,
69
- return_attention_mask = True)
70
- return {'input_ids': tokens.input_ids.flatten(), 'attention_mask': tokens.attention_mask.flatten(), 'labels': attributes}
71
-
72
-
73
- class Comments_Data_Module(pl.LightningDataModule):
74
-
75
- def __init__(self, train_path, val_path, attributes, batch_size: int = 16, max_token_length: int = 128, model_name='roberta-base'):
76
- super().__init__()
77
- self.train_path = train_path
78
- self.val_path = val_path
79
- self.attributes = attributes
80
- self.batch_size = batch_size
81
- self.max_token_length = max_token_length
82
- self.model_name = model_name
83
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
84
-
85
- def setup(self, stage = None):
86
- if stage in (None, "fit"):
87
- self.train_dataset = Comments_Dataset(self.train_path, attributes=self.attributes, tokenizer=self.tokenizer)
88
- self.val_dataset = Comments_Dataset(self.val_path, attributes=self.attributes, tokenizer=self.tokenizer, sample=None)
89
- if stage == 'predict':
90
- self.val_dataset = Comments_Dataset(self.val_path, attributes=self.attributes, tokenizer=self.tokenizer, sample=None)
91
-
92
- def train_dataloader(self):
93
- return DataLoader(self.train_dataset, batch_size = self.batch_size, num_workers=4, shuffle=True)
94
-
95
- def val_dataloader(self):
96
- return DataLoader(self.val_dataset, batch_size = self.batch_size, num_workers=4, shuffle=False)
97
-
98
- def predict_dataloader(self):
99
- return DataLoader(self.val_dataset, batch_size = self.batch_size, num_workers=4, shuffle=False)
100
-
101
- comments_data_module = Comments_Data_Module(train_path, test_dataset_path, attributes=attributes)
102
- comments_data_module.setup()
103
- comments_data_module.train_dataloader()
104
-
105
- class Comment_Classifier(pl.LightningModule):
106
- def __init__(self, config: dict):
107
- super().__init__()
108
- self.config = config
109
- self.pretrained_model = AutoModel.from_pretrained(config['model_name'], return_dict = True)
110
- self.hidden = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.pretrained_model.config.hidden_size)
111
- self.classifier = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.config['n_labels'])
112
- torch.nn.init.xavier_uniform_(self.classifier.weight)
113
- self.loss_func = nn.CrossEntropyLoss()
114
- self.dropout = nn.Dropout()
115
-
116
- def forward(self, input_ids, attention_mask, labels=None):
117
- output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask)
118
- pooled_output = torch.mean(output.last_hidden_state, 1)
119
- pooled_output = self.dropout(pooled_output)
120
- pooled_output = self.hidden(pooled_output)
121
- pooled_output = F.relu(pooled_output)
122
- pooled_output = self.dropout(pooled_output)
123
- logits = self.classifier(pooled_output)
124
- # calculate loss
125
- loss = 0
126
- if labels is not None:
127
- loss = self.loss_func(logits.view(-1, self.config['n_labels']), labels.view(-1, self.config['n_labels']))
128
- return loss, logits
129
-
130
- def training_step(self, batch, batch_index):
131
- loss, outputs = self(**batch)
132
- self.log("train loss ", loss, prog_bar = True, logger=True)
133
- return {"loss":loss, "predictions":outputs, "labels": batch["labels"]}
134
-
135
- def validation_step(self, batch, batch_index):
136
- loss, outputs = self(**batch)
137
- self.log("validation loss ", loss, prog_bar = True, logger=True)
138
- return {"val_loss": loss, "predictions":outputs, "labels": batch["labels"]}
139
-
140
- def predict_step(self, batch, batch_index):
141
- loss, outputs = self(**batch)
142
- return outputs
143
-
144
- def configure_optimizers(self):
145
- optimizer = AdamW(self.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'])
146
- total_steps = self.config['train_size']/self.config['batch_size']
147
- warmup_steps = math.floor(total_steps * self.config['warmup'])
148
- scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
149
- return [optimizer],[scheduler]
150
-
151
- config = {
152
- 'model_name': 'distilroberta-base',
153
- 'n_labels': len(attributes),
154
- 'batch_size': 128,
155
- 'lr': 1.5e-6,
156
- 'warmup': 0.2,
157
- 'train_size': len(comments_data_module.train_dataloader()),
158
- 'weight_decay': 0.001,
159
- 'n_epochs': 100
160
- }
161
-
162
-
163
- model_name = 'distilroberta-base'
164
- tokenizer = AutoTokenizer.from_pretrained(model_name)
165
-
166
- model = Comment_Classifier(config=config)
167
- model.load_state_dict(torch.load("model_state_dict.pt"))
168
- model.eval()
169
-
170
- def prepare_tokenized_review(raw_review):
171
- review_text = BeautifulSoup(raw_review).get_text()
172
- review_text = re.sub("[^a-zA-Z!?]"," ", review_text)
173
- words = review_text.lower().split()
174
-
175
- return " ".join(words)
176
-
177
- def get_encodings(text):
178
- MAX_LEN=256
179
- encodings = tokenizer.encode_plus(
180
- text,
181
- None,
182
- add_special_tokens=True,
183
- max_length=MAX_LEN,
184
  padding='max_length',
185
  truncation=True,
 
186
  return_attention_mask=True,
187
- return_tensors='pt')
188
- return encodings
189
-
190
- def run_inference(encoding):
191
- with torch.no_grad():
192
- input_ids = encoding['input_ids'].to(device, dtype=torch.long)
193
- attention_mask = encoding['attention_mask'].to(device, dtype=torch.long)
194
- output = model(input_ids, attention_mask)
195
- final_output = torch.softmax(output[1][0],dim=0).cpu()
196
- print(final_output.numpy().tolist())
197
- return final_output.numpy().tolist()
198
-
199
-
200
- test_tweets = test_df["comment_text"].values
201
- #streamlit section
202
- models = ["distilroberta-base"]
203
- model_pointers = ["default: distilroberta-base"]
204
-
205
- st.write("1. Hit the button to view and see the analyis of a random tweet")
206
-
207
- with st.form(key="init_form"):
208
- current_random_tweet = test_tweets[random.randint(0,len(test_tweets))]
209
- current_random_tweet = prepare_tokenized_review(current_random_tweet)
210
-
211
-
212
- choice = st.selectbox("Choose Model", model_pointers)
213
-
214
-
215
- user_picked_model = models[model_pointers.index(choice)]
216
- with st.spinner("Analyzing..."):
217
- text_encoding = get_encodings(current_random_tweet)
218
- result = run_inference(text_encoding)
219
- df = pd.DataFrame({"Tweet":current_random_tweet}, index=[0])
220
- df["Highest Toxicity Class"] = attributes[result.index(max(result))]
221
- df["Sentiment Score"] = max(result)
222
- st.table(df)
223
- next_tweet = st.form_submit_button("Next Tweet")
224
-
225
- if next_tweet:
226
- with st.spinner("Analyzing..."):
227
- st.write("")
 
1
  import streamlit as st
2
  import pandas as pd
3
+ import transformers
4
  import torch
 
 
 
 
 
 
 
5
 
6
+ # Load the pre-trained BERT model and tokenizer
7
+ tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-uncased')
8
+ model = transformers.BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=6)
9
 
10
+ # Set up the Streamlit app
11
+ st.title('Toxicity Classification App')
12
 
13
+ # Create a text input for the user to enter their text
14
+ text_input = st.text_input('Enter text to classify')
 
 
 
 
 
 
15
 
16
+ # Create a button to run the classification
17
+ if st.button('Classify'):
18
+ # Tokenize the text and convert to input IDs
19
+ encoded_text = tokenizer.encode_plus(
20
+ text_input,
21
+ max_length=512,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  padding='max_length',
23
  truncation=True,
24
+ add_special_tokens=True,
25
  return_attention_mask=True,
26
+ return_tensors='pt'
27
+ )
28
+
29
+ # Run the text through the model
30
+ with torch.no_grad():
31
+ output = model(encoded_text['input_ids'], encoded_text['attention_mask'])
32
+ probabilities = torch.nn.functional.softmax(output[0], dim=1).tolist()[0]
33
+
34
+ # Display the classification results
35
+ st.write('Toxic:', probabilities[0])
36
+ st.write('Severe Toxic:', probabilities[1])
37
+ st.write('Obscene:', probabilities[2])
38
+ st.write('Threat:', probabilities[3])
39
+ st.write('Insult:', probabilities[4])
40
+ st.write('Identity Hate:', probabilities[5])
41
+
42
+ # Create a DataFrame to store the classification results
43
+ results_df = pd.DataFrame({
44
+ 'Text': [text_input],
45
+ 'Toxic': [probabilities[0]],
46
+ 'Severe Toxic': [probabilities[1]],
47
+ 'Obscene': [probabilities[2]],
48
+ 'Threat': [probabilities[3]],
49
+ 'Insult': [probabilities[4]],
50
+ 'Identity Hate': [probabilities[5]]
51
+ })
52
+
53
+ # Append the classification results to the persistent DataFrame
54
+ if 'results' not in st.session_state:
55
+ st.session_state['results'] = pd.DataFrame(columns=results_df.columns)
56
+ st.session_state['results'] = st.session_state['results'].append(results_df, ignore_index=True)
57
+
58
+ # Display the persistent DataFrame
59
+ st.write('Classification Results:', st.session_state.get('results', pd.DataFrame()))