Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, T5ForConditionalGeneration | |
import torch | |
def load_model(): | |
model_name = "Salesforce/codet5-base-multi-sum" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = T5ForConditionalGeneration.from_pretrained(model_name) | |
model.eval() | |
model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) | |
return tokenizer, model | |
def generate_explanation(code, tokenizer, model): | |
device = model.device | |
# Better prompt engineering | |
input_text = f"summarize: This Python function does the following: {code}" | |
input_ids = tokenizer.encode(input_text, return_tensors="pt", truncation=True).to(device) | |
output = model.generate(input_ids, max_new_tokens=200, early_stopping=True) | |
return tokenizer.decode(output[0], skip_special_tokens=True) | |