import streamlit as st from transformers import T5Tokenizer, T5ForConditionalGeneration @st.cache_resource def load_model(): tokenizer = T5Tokenizer.from_pretrained("Salesforce/codet5-base") model = T5ForConditionalGeneration.from_pretrained("Salesforce/codet5-base") return tokenizer, model tokenizer, model = load_model() st.title("🧠 Code Explainer (CodeT5)") st.markdown("Paste code and get an explanation using the CodeT5 model from Hugging Face.") code_input = st.text_area("Paste your code here:", height=200) if st.button("Explain Code"): if code_input.strip() == "": st.warning("Please paste some code first.") else: with st.spinner("Generating explanation..."): input_text = f"summarize: {code_input.strip()}" input_ids = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512) outputs = model.generate(input_ids, max_length=150, num_beams=4, early_stopping=True) summary = tokenizer.decode(outputs[0], skip_special_tokens=True) st.success("Explanation:") st.write(summary)