sathwikabhavaraju2005 commited on
Commit
649af31
·
verified ·
1 Parent(s): a99a306

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -14,20 +14,23 @@ model_plag = SentenceTransformer('all-MiniLM-L6-v2')
14
  # ------------------------------
15
  # Offline Quiz Generator
16
  # ------------------------------
 
 
 
17
  def generate_mcqs(text, num_questions=3):
18
- input_text = f"generate question: {text}"
19
- print(input_text)
20
  input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
21
 
22
- questions = []
23
- for _ in range(num_questions):
24
- outputs = model_qg.generate(input_ids=input_ids, max_length=100, num_return_sequences=1, temperature=0.7)
25
- decoded = tokenizer_qg.decode(outputs[0], skip_special_tokens=True)
26
- print("decoded")
27
- print(decoded)
28
- questions.append(decoded.strip())
29
 
30
- return "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)])
 
31
 
32
  # ------------------------------
33
  # Weakness Analyzer
 
14
  # ------------------------------
15
  # Offline Quiz Generator
16
  # ------------------------------
17
+ model_qg = T5ForConditionalGeneration.from_pretrained("valhalla/t5-small-qg-hl")
18
+ tokenizer_qg = T5Tokenizer.from_pretrained("valhalla/t5-small-qg-hl")
19
+
20
  def generate_mcqs(text, num_questions=3):
21
+ input_text = f"generate questions: {text}"
 
22
  input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
23
 
24
+ outputs = model_qg.generate(
25
+ input_ids=input_ids,
26
+ max_length=128,
27
+ num_return_sequences=num_questions,
28
+ num_beams=num_questions,
29
+ early_stopping=True
30
+ )
31
 
32
+ decoded = [tokenizer_qg.decode(output, skip_special_tokens=True).strip() for output in outputs]
33
+ return "\n".join([f"{i+1}. {q}" for i, q in enumerate(decoded)])
34
 
35
  # ------------------------------
36
  # Weakness Analyzer