Gregoryjr commited on
Commit
92ac326
·
unverified ·
1 Parent(s): 2fda257

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -14
app.py CHANGED
@@ -31,29 +31,19 @@ if con:
31
  tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
32
  model = AutoModelForSequenceClassification.from_pretrained("Greys/milestonemodel")
33
  my_list = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
34
- def classify_sentence(text):
35
- inputs = tokenizer(text, return_tensors="pt")
36
- outputs = model(**inputs)
37
- probs = outputs.logits.softmax(dim=1)
38
- return probs.detach().numpy()[0]
39
- probs = classify_sentence(text)
40
  def find_largest_number(numbers):
41
- if len(numbers) == 0:
42
  print("List is empty.")
43
- return None, None
44
-
45
  max_num = numbers[0]
46
  max_index = 0
47
  for i in range(1, len(numbers)):
48
  if numbers[i] > max_num:
49
  max_num = numbers[i]
50
  max_index = i
51
-
52
- return max_index
53
-
54
-
55
- print(probs)
56
 
 
 
57
  index = find_largest_number(probs)
58
  st.write(my_list[index])
59
  #id,toxic,severe_toxic,obscene,threat,insult,identity_hate
 
31
  tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
32
  model = AutoModelForSequenceClassification.from_pretrained("Greys/milestonemodel")
33
  my_list = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
 
 
 
 
 
 
34
  def find_largest_number(numbers):
35
+ if not numbers:
36
  print("List is empty.")
37
+ return None
 
38
  max_num = numbers[0]
39
  max_index = 0
40
  for i in range(1, len(numbers)):
41
  if numbers[i] > max_num:
42
  max_num = numbers[i]
43
  max_index = i
 
 
 
 
 
44
 
45
+ return max_index
46
+ print(probs)
47
  index = find_largest_number(probs)
48
  st.write(my_list[index])
49
  #id,toxic,severe_toxic,obscene,threat,insult,identity_hate