Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
def load_model(): | |
model_name = "Salesforce/codet5-base" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
model.eval() | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model.to(device) | |
return tokenizer, model, device | |
def generate_explanation(prompt, tokenizer, model, device): | |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to(device) | |
output = model.generate(**inputs, max_new_tokens=256, temperature=0.7) | |
return tokenizer.decode(output[0], skip_special_tokens=True) | |