Prajjwal888 commited on
Commit
f552345
·
1 Parent(s): 1f14dbf

Add application file

Browse files
Files changed (2) hide show
  1. app.py +92 -0
  2. requirements.txt +0 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+ import gradio as gr
4
+ import re
5
+
6
+ model_path = "prajjwal888/Llama-2-7b-chat-question-generation"
7
+
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_path,
10
+ torch_dtype=torch.float16,
11
+ device_map="auto"
12
+ )
13
+
14
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
15
+
16
+ def parse_generated_text(text: str) -> dict:
17
+ clean_text = re.sub(r"\[/?INST\]", "", text)
18
+ clean_text = re.sub(r"Question:\s*Question:", "Question:", clean_text)
19
+ clean_text = clean_text.strip()
20
+
21
+ match = re.search(r"Question:\s*(.*?)(?:\nHint:|Hint:)(.*)", clean_text, re.DOTALL)
22
+
23
+ if match:
24
+ question = match.group(1).strip().strip('"').replace("Question:", "").strip()
25
+ hint = match.group(2).strip().strip('"')
26
+ else:
27
+ question = clean_text.strip()
28
+ hint = "No hint available"
29
+
30
+ return {
31
+ "question": question,
32
+ "hint": hint
33
+ }
34
+
35
+ def generate_questions(topic, difficulty, types, count):
36
+ print("Received input:", topic, difficulty, types, count)
37
+
38
+ try:
39
+ pipe = pipeline(
40
+ task="text-generation",
41
+ model=model,
42
+ tokenizer=tokenizer,
43
+ # device=0,
44
+ max_length=200,
45
+ temperature=0.7,
46
+ top_p=0.9,
47
+ do_sample=True
48
+ )
49
+
50
+ questions = []
51
+
52
+ for _ in range(count):
53
+ for q_type in types:
54
+ prompt = (
55
+ f"Generate a {difficulty} difficulty {q_type} question about {topic}.\n"
56
+ "Format strictly as follows:\n"
57
+ "Question: <your question here>\n"
58
+ "Hint: <your hint here or 'No hint available'>"
59
+ )
60
+
61
+ formatted_prompt = f"<s>[INST] {prompt} [/INST]"
62
+ print("Prompt:", formatted_prompt)
63
+
64
+ result = pipe(formatted_prompt)
65
+ print("Raw Output:", result)
66
+
67
+ generated_text = result[0]['generated_text'].replace(formatted_prompt, "").strip()
68
+ parsed = parse_generated_text(generated_text)
69
+
70
+ questions.append(f"**Type**: {q_type}\n\n**Question**: {parsed['question']}\n\n**Hint**: {parsed['hint']}\n\n---")
71
+
72
+ return "\n\n".join(questions)
73
+
74
+ except Exception as e:
75
+ print("Error:", e)
76
+ return f"Something went wrong: {e}"
77
+
78
+ iface = gr.Interface(
79
+ fn=generate_questions,
80
+ inputs=[
81
+ gr.Textbox(label="Topic"),
82
+ gr.Dropdown(choices=["easy", "medium", "hard"], label="Difficulty", value="medium"),
83
+ gr.CheckboxGroup(choices=["Conceptual", "Numerical", "Application"], label="Question Types"),
84
+ gr.Slider(minimum=1, maximum=5, step=1, value=2, label="Number of Questions per Type")
85
+ ],
86
+ outputs=gr.Markdown(label="Generated Questions"),
87
+ title="AI Question Generator",
88
+ description="Enter a topic, select difficulty and question types to generate AI-powered questions."
89
+ )
90
+
91
+ iface.queue()
92
+ iface.launch()
requirements.txt ADDED
Binary file (2.38 kB). View file