Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load the model and tokenizer | |
try: | |
checkpoint = "Salesforce/codegen-350M-mono" | |
tokenizer = AutoTokenizer.from_pretrained(checkpoint) | |
model = AutoModelForCausalLM.from_pretrained(checkpoint) | |
except Exception as e: | |
st.error(f"Error loading model: {e}") | |
st.stop() | |
# Function to generate code from description | |
def generate_code(description): | |
prompt = f"Generate Python code for the following task: {description}\n" | |
inputs = tokenizer(prompt, return_tensors="pt") | |
with st.spinner("Generating code..."): | |
outputs = model.generate( | |
**inputs, | |
max_length=500, | |
num_return_sequences=1, | |
pad_token_id=tokenizer.eos_token_id # Avoid padding token warnings | |
) | |
code = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Extract only the code part after the prompt | |
code = code[len(prompt):].strip() | |
return code | |
# Streamlit UI | |
st.title("Code Generation Bot") | |
st.write("Enter a description to generate Python code!") | |
description = st.text_area("Description", placeholder="e.g., Write a function to add two numbers") | |
if st.button("Generate Code"): | |
if description.strip(): | |
generated_code = generate_code(description) | |
st.code(generated_code, language="python") | |
st.info("Tip: Review the code for accuracy before using!") | |
else: | |
st.warning("Please enter a description!") |