Update utils/quiz_offline.py
Browse files- utils/quiz_offline.py +15 -16
utils/quiz_offline.py
CHANGED
@@ -1,18 +1,17 @@
|
|
1 |
-
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
prompt = f"generate question: {highlighted}"
|
10 |
-
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
11 |
-
output = model.generate(input_ids, max_length=128)
|
12 |
-
question = tokenizer.decode(output[0], skip_special_tokens=True)
|
13 |
-
return question
|
14 |
-
|
15 |
-
# Example
|
16 |
-
context = "The mitochondria is responsible for producing energy in the cell."
|
17 |
-
answer = "mitochondria"
|
18 |
-
print(generate_question(context, answer))
|
|
|
1 |
+
def generate_mcqs(text, num_questions=1):
|
2 |
+
input_text = f"generate question: {text}"
|
3 |
+
input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
4 |
|
5 |
+
questions = []
|
6 |
+
for _ in range(num_questions):
|
7 |
+
output = model.generate(
|
8 |
+
input_ids=input_ids,
|
9 |
+
max_length=128,
|
10 |
+
num_return_sequences=1,
|
11 |
+
temperature=0.7
|
12 |
+
)
|
13 |
+
decoded = tokenizer.decode(output[0], skip_special_tokens=True)
|
14 |
+
questions.append(decoded.strip())
|
15 |
|
16 |
+
# Join all questions into one string for Gradio output
|
17 |
+
return "\n".join(f"{i+1}. {q}" for i, q in enumerate(questions))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|