fahadkhan93 commited on
Commit
07eb8d8
·
verified ·
1 Parent(s): 506372a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ # Load the fine-tuned EE LLM
5
+ model_name = "STEM-AI-mtl/phi-2-electrical-engineering"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256)
9
+
10
+ # Define function to generate answer
11
+ def generate_answer(question):
12
+ prompt = f"Answer this electronics engineering question:\n{question}\nAnswer:"
13
+ response = gen_pipeline(prompt, do_sample=True, temperature=0.7)[0]["generated_text"]
14
+ answer = response.split("Answer:")[-1].strip()
15
+ return answer
16
+
17
+ # Gradio UI
18
+ with gr.Blocks() as demo:
19
+ gr.Markdown("## 🤖 Ask Me Electronics Engineering Questions")
20
+ question = gr.Textbox(label="Your Question", placeholder="e.g. What is a BJT?")
21
+ output = gr.Textbox(label="AI Answer", lines=4)
22
+ button = gr.Button("Generate Answer")
23
+ button.click(generate_answer, inputs=question, outputs=output)
24
+
25
+ if __name__ == "__main__":
26
+ demo.launch()