File size: 1,094 Bytes
07eb8d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Load the fine-tuned EE LLM
model_name = "STEM-AI-mtl/phi-2-electrical-engineering"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256)

# Define function to generate answer
def generate_answer(question):
    prompt = f"Answer this electronics engineering question:\n{question}\nAnswer:"
    response = gen_pipeline(prompt, do_sample=True, temperature=0.7)[0]["generated_text"]
    answer = response.split("Answer:")[-1].strip()
    return answer

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## 🤖 Ask Me Electronics Engineering Questions")
    question = gr.Textbox(label="Your Question", placeholder="e.g. What is a BJT?")
    output = gr.Textbox(label="AI Answer", lines=4)
    button = gr.Button("Generate Answer")
    button.click(generate_answer, inputs=question, outputs=output)

if __name__ == "__main__":
    demo.launch()