File size: 617 Bytes
d765c07
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

def load_hf_model(model_name, device="cpu"):
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    return pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if device=="cuda" else -1)

def generate_answer(text_gen, question, context):
    prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer:"
    result = text_gen(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
    return result[0]["generated_text"].split("Answer:")[-1].strip()