Kvikontent commited on
Commit
242de2a
·
1 Parent(s): d620412

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -1,22 +1,24 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, FalconModel
 
3
 
4
- tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b")
5
- model = FalconModel.from_pretrained("Rocketknight1/falcon-rw-1b")
 
6
 
7
- def chat(input_text):
8
- inputs = tokenizer(input_text, return_tensors="pt")
9
- outputs = model(**inputs)
10
- last_hidden_states = outputs.last_hidden_state
11
- response = tokenizer.decode(last_hidden_states.squeeze(), skip_special_tokens=True)
12
- return response
 
 
 
 
 
 
13
 
14
- interface = gr.Interface(
15
- fn=chat,
16
- inputs=gr.Textbox(label="User Question"),
17
- outputs=gr.Textbox(label="Chatbot Response"),
18
- title="Falcon-7b",
19
- theme="dark"
20
- )
21
-
22
- interface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, FalconForSequenceClassification
3
+ import torch
4
 
5
+ # Load pre-trained FALCON model and tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained('rocketknight1/falcon-rw-1b')
7
+ model = FalconForSequenceClassification.from_pretrained('rocketknight1/falcon-rw-1b', num_labels=8)
8
 
9
+ def predict(text):
10
+ # Encode input text into IDs and mask
11
+ inputs = tokenizer(text, return_tensors='pt')
12
+
13
+ # Run inference on encoded input
14
+ outputs = model(inputs['input_ids'], attention_mask=inputs['attention_mask'])
15
+
16
+ # Get last hidden state (i.e., output of final layer)
17
+ last_hidden_state = outputs.last_hidden_state[:, 0, :]
18
+
19
+ # Return predicted label
20
+ return torch.argmax(last_hidden_state).item() + 1
21
 
22
+ iface = gr.Interface(fn=predict, inputs="textbox", outputs="label", title="Falcon-7b")
23
+ if __name__ == "__main__":
24
+ iface.launch()