Update app.py
Browse files
app.py
CHANGED
@@ -14,20 +14,23 @@ model_plag = SentenceTransformer('all-MiniLM-L6-v2')
|
|
14 |
# ------------------------------
|
15 |
# Offline Quiz Generator
|
16 |
# ------------------------------
|
|
|
|
|
|
|
17 |
def generate_mcqs(text, num_questions=3):
|
18 |
-
input_text = f"generate
|
19 |
-
print(input_text)
|
20 |
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
|
30 |
-
|
|
|
31 |
|
32 |
# ------------------------------
|
33 |
# Weakness Analyzer
|
|
|
14 |
# ------------------------------
|
15 |
# Offline Quiz Generator
|
16 |
# ------------------------------
|
17 |
+
model_qg = T5ForConditionalGeneration.from_pretrained("valhalla/t5-small-qg-hl")
|
18 |
+
tokenizer_qg = T5Tokenizer.from_pretrained("valhalla/t5-small-qg-hl")
|
19 |
+
|
20 |
def generate_mcqs(text, num_questions=3):
|
21 |
+
input_text = f"generate questions: {text}"
|
|
|
22 |
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
23 |
|
24 |
+
outputs = model_qg.generate(
|
25 |
+
input_ids=input_ids,
|
26 |
+
max_length=128,
|
27 |
+
num_return_sequences=num_questions,
|
28 |
+
num_beams=num_questions,
|
29 |
+
early_stopping=True
|
30 |
+
)
|
31 |
|
32 |
+
decoded = [tokenizer_qg.decode(output, skip_special_tokens=True).strip() for output in outputs]
|
33 |
+
return "\n".join([f"{i+1}. {q}" for i, q in enumerate(decoded)])
|
34 |
|
35 |
# ------------------------------
|
36 |
# Weakness Analyzer
|