Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# Load the fine-tuned EE LLM | |
model_name = "STEM-AI-mtl/phi-2-electrical-engineering" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256) | |
# Define function to generate answer | |
def generate_answer(question): | |
prompt = f"Answer this electronics engineering question:\n{question}\nAnswer:" | |
response = gen_pipeline(prompt, do_sample=True, temperature=0.7)[0]["generated_text"] | |
answer = response.split("Answer:")[-1].strip() | |
return answer | |
# Gradio UI | |
with gr.Blocks() as demo: | |
gr.Markdown("## 🤖 Ask Me Electronics Engineering Questions") | |
question = gr.Textbox(label="Your Question", placeholder="e.g. What is a BJT?") | |
output = gr.Textbox(label="AI Answer", lines=4) | |
button = gr.Button("Generate Answer") | |
button.click(generate_answer, inputs=question, outputs=output) | |
if __name__ == "__main__": | |
demo.launch() | |