llm_test / src /brain.py
fschwartzer's picture
Update src/brain.py
39d95d6 verified
raw
history blame
538 Bytes
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained('juridics/bertimbaulaw-base-portuguese-sts-scale')
model = AutoModel.from_pretrained('juridics/bertimbaulaw-base-portuguese-sts-scale')
def generate_answers(query):
input_ids = tokenizer.encode(query, return_tensors='pt')
max_length = input_ids.shape[1] + 100
generated_ids = model.generate(input_ids, max_length=max_length)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
return generated_text