Spaces:
Sleeping
Sleeping
File size: 757 Bytes
b2a141e 2cfa511 092f4f2 2cfa511 9ca2989 2cfa511 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
def load_model():
model_name = "Salesforce/codet5-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
return tokenizer, model, device
def generate_explanation(prompt, tokenizer, model, device):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(device)
output = model.generate(
**inputs,
decoder_start_token_id=tokenizer.pad_token_id,
max_new_tokens=256,
temperature=0.7
)
return tokenizer.decode(output[0], skip_special_tokens=True)
|