sathwikabhavaraju2005 commited on
Commit
48966e1
·
verified ·
1 Parent(s): a7abe70

Update utils/quiz_offline.py

Browse files
Files changed (1) hide show
  1. utils/quiz_offline.py +15 -16
utils/quiz_offline.py CHANGED
@@ -1,18 +1,17 @@
1
- from transformers import T5Tokenizer, T5ForConditionalGeneration
 
 
2
 
3
- model_name = "valhalla/t5-small-qg-hl"
4
- tokenizer = T5Tokenizer.from_pretrained(model_name)
5
- model = T5ForConditionalGeneration.from_pretrained(model_name)
 
 
 
 
 
 
 
6
 
7
- def generate_question(context, answer):
8
- highlighted = context.replace(answer, f"<hl> {answer} <hl>")
9
- prompt = f"generate question: {highlighted}"
10
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
11
- output = model.generate(input_ids, max_length=128)
12
- question = tokenizer.decode(output[0], skip_special_tokens=True)
13
- return question
14
-
15
- # Example
16
- context = "The mitochondria is responsible for producing energy in the cell."
17
- answer = "mitochondria"
18
- print(generate_question(context, answer))
 
1
+ def generate_mcqs(text, num_questions=1):
2
+ input_text = f"generate question: {text}"
3
+ input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
4
 
5
+ questions = []
6
+ for _ in range(num_questions):
7
+ output = model.generate(
8
+ input_ids=input_ids,
9
+ max_length=128,
10
+ num_return_sequences=1,
11
+ temperature=0.7
12
+ )
13
+ decoded = tokenizer.decode(output[0], skip_special_tokens=True)
14
+ questions.append(decoded.strip())
15
 
16
+ # Join all questions into one string for Gradio output
17
+ return "\n".join(f"{i+1}. {q}" for i, q in enumerate(questions))