llm_test / src /brain.py
fschwartzer's picture
Update src/brain.py
151d72b verified
raw
history blame
566 Bytes
from transformers import BertTokenizer, BertForSequenceClassification
tokenizer = BertTokenizer.from_pretrained('juridics/bertimbaulaw-base-portuguese-sts-scale')
model = BertForSequenceClassification.from_pretrained('juridics/bertimbaulaw-base-portuguese-sts-scale')
def generate_answers(query):
inputs = tokenizer(query, return_tensors="pt", padding=True, truncation=True)
outputs = model(**inputs)
prediction = torch.argmax(outputs.logits, dim=1)
labels = ['ds','real','Group']
predicted_label = labels[prediction]
return predicted_label