Kvikontent commited on
Commit
c1e316f
·
1 Parent(s): 242de2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,15 +3,15 @@ from transformers import AutoTokenizer, FalconForSequenceClassification
3
  import torch
4
 
5
  # Load pre-trained FALCON model and tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained('rocketknight1/falcon-rw-1b')
7
- model = FalconForSequenceClassification.from_pretrained('rocketknight1/falcon-rw-1b', num_labels=8)
8
 
9
  def predict(text):
10
  # Encode input text into IDs and mask
11
- inputs = tokenizer(text, return_tensors='pt')
12
 
13
  # Run inference on encoded input
14
- outputs = model(inputs['input_ids'], attention_mask=inputs['attention_mask'])
15
 
16
  # Get last hidden state (i.e., output of final layer)
17
  last_hidden_state = outputs.last_hidden_state[:, 0, :]
@@ -19,6 +19,6 @@ def predict(text):
19
  # Return predicted label
20
  return torch.argmax(last_hidden_state).item() + 1
21
 
22
- iface = gr.Interface(fn=predict, inputs="textbox", outputs="label", title="Falcon-7b")
23
  if __name__ == "__main__":
24
  iface.launch()
 
3
  import torch
4
 
5
  # Load pre-trained FALCON model and tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained("rocketknight1/falcon-rw-1b")
7
+ model = FalconForSequenceClassification.from_pretrained("rocketknight1/falcon-rw-1b", num_labels=8)
8
 
9
  def predict(text):
10
  # Encode input text into IDs and mask
11
+ inputs = tokenizer(text, return_tensors="pt")
12
 
13
  # Run inference on encoded input
14
+ outputs = model(**inputs)
15
 
16
  # Get last hidden state (i.e., output of final layer)
17
  last_hidden_state = outputs.last_hidden_state[:, 0, :]
 
19
  # Return predicted label
20
  return torch.argmax(last_hidden_state).item() + 1
21
 
22
+ iface = gr.Interface(fn=predict, inputs="textbox", outputs="label", title="FALCON Chatbot")
23
  if __name__ == "__main__":
24
  iface.launch()