Kvikontent commited on
Commit
4d54af4
·
1 Parent(s): c1e316f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -20
app.py CHANGED
@@ -1,24 +1,15 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, FalconForSequenceClassification
3
- import torch
4
 
5
- # Load pre-trained FALCON model and tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained("rocketknight1/falcon-rw-1b")
7
- model = FalconForSequenceClassification.from_pretrained("rocketknight1/falcon-rw-1b", num_labels=8)
8
 
9
- def predict(text):
10
- # Encode input text into IDs and mask
11
- inputs = tokenizer(text, return_tensors="pt")
12
-
13
- # Run inference on encoded input
14
- outputs = model(**inputs)
15
-
16
- # Get last hidden state (i.e., output of final layer)
17
- last_hidden_state = outputs.last_hidden_state[:, 0, :]
18
-
19
- # Return predicted label
20
- return torch.argmax(last_hidden_state).item() + 1
21
 
22
- iface = gr.Interface(fn=predict, inputs="textbox", outputs="label", title="FALCON Chatbot")
23
- if __name__ == "__main__":
24
- iface.launch()
 
1
  import gradio as gr
2
+ from transformers import BertTokenizer, BertForSequenceClassification
 
3
 
4
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
5
+ model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=8)
 
6
 
7
+ def classify(text):
8
+ inputs = tokenizer(text, return_tensors='pt')
9
+ outputs = model(inputs['input_ids'], attention_mask=inputs['attention_mask'])
10
+ logits = outputs[0].logits
11
+ probabilities = torch.softmax(logits, dim=-1).tolist()
12
+ predicted_label = np.argmax(probabilities)
13
+ return {'Label': predicted_label}
 
 
 
 
 
14
 
15
+ gr.Interface(fn=classify, inputs="text", outputs="json").launch()