Update app.py
Browse files
app.py
CHANGED
@@ -5,16 +5,28 @@ from transformers import T5Tokenizer, T5ForConditionalGeneration
|
|
5 |
from sentence_transformers import SentenceTransformer, util
|
6 |
|
7 |
# ------------------------------
|
8 |
-
#
|
9 |
# ------------------------------
|
10 |
model_qg = T5ForConditionalGeneration.from_pretrained("t5-base")
|
11 |
tokenizer_qg = T5Tokenizer.from_pretrained("t5-base")
|
|
|
12 |
|
|
|
|
|
|
|
13 |
def generate_mcqs(text, num_questions=3):
|
14 |
-
input_text = f"generate
|
|
|
15 |
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# ------------------------------
|
20 |
# Weakness Analyzer
|
@@ -25,7 +37,7 @@ def analyze_weakness(csv_file):
|
|
25 |
return summary.to_string()
|
26 |
|
27 |
# ------------------------------
|
28 |
-
# Teaching Assistant
|
29 |
# ------------------------------
|
30 |
def chatbot_response(message, history):
|
31 |
return "This is a placeholder response for now. (LLM not integrated)"
|
@@ -75,8 +87,6 @@ def translate_text(text, target_lang):
|
|
75 |
# ------------------------------
|
76 |
# Plagiarism Checker
|
77 |
# ------------------------------
|
78 |
-
model_plag = SentenceTransformer('all-MiniLM-L6-v2')
|
79 |
-
|
80 |
def check_plagiarism(text1, text2):
|
81 |
emb1 = model_plag.encode(text1, convert_to_tensor=True)
|
82 |
emb2 = model_plag.encode(text2, convert_to_tensor=True)
|
@@ -93,7 +103,7 @@ with gr.Blocks() as demo:
|
|
93 |
quiz_text = gr.Textbox(label="Content", lines=5)
|
94 |
quiz_slider = gr.Slider(1, 10, value=3, label="Number of Questions")
|
95 |
quiz_btn = gr.Button("Generate Quiz")
|
96 |
-
quiz_out = gr.Textbox(label="Generated Quiz")
|
97 |
quiz_btn.click(fn=generate_mcqs, inputs=[quiz_text, quiz_slider], outputs=quiz_out)
|
98 |
|
99 |
with gr.Tab("π Weakness Analyzer"):
|
|
|
5 |
from sentence_transformers import SentenceTransformer, util
|
6 |
|
7 |
# ------------------------------
|
8 |
+
# Load Models
|
9 |
# ------------------------------
|
10 |
model_qg = T5ForConditionalGeneration.from_pretrained("t5-base")
|
11 |
tokenizer_qg = T5Tokenizer.from_pretrained("t5-base")
|
12 |
+
model_plag = SentenceTransformer('all-MiniLM-L6-v2')
|
13 |
|
14 |
+
# ------------------------------
|
15 |
+
# Offline Quiz Generator
|
16 |
+
# ------------------------------
|
17 |
def generate_mcqs(text, num_questions=3):
|
18 |
+
input_text = f"generate question: {text}"
|
19 |
+
print(input_text)
|
20 |
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
21 |
+
|
22 |
+
questions = []
|
23 |
+
for _ in range(num_questions):
|
24 |
+
outputs = model_qg.generate(input_ids=input_ids, max_length=100, num_return_sequences=1, temperature=0.7)
|
25 |
+
decoded = tokenizer_qg.decode(outputs[0], skip_special_tokens=True)
|
26 |
+
print(decoded)
|
27 |
+
questions.append(decoded.strip())
|
28 |
+
|
29 |
+
return "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)])
|
30 |
|
31 |
# ------------------------------
|
32 |
# Weakness Analyzer
|
|
|
37 |
return summary.to_string()
|
38 |
|
39 |
# ------------------------------
|
40 |
+
# Teaching Assistant
|
41 |
# ------------------------------
|
42 |
def chatbot_response(message, history):
|
43 |
return "This is a placeholder response for now. (LLM not integrated)"
|
|
|
87 |
# ------------------------------
|
88 |
# Plagiarism Checker
|
89 |
# ------------------------------
|
|
|
|
|
90 |
def check_plagiarism(text1, text2):
|
91 |
emb1 = model_plag.encode(text1, convert_to_tensor=True)
|
92 |
emb2 = model_plag.encode(text2, convert_to_tensor=True)
|
|
|
103 |
quiz_text = gr.Textbox(label="Content", lines=5)
|
104 |
quiz_slider = gr.Slider(1, 10, value=3, label="Number of Questions")
|
105 |
quiz_btn = gr.Button("Generate Quiz")
|
106 |
+
quiz_out = gr.Textbox(label="Generated Quiz", lines=10)
|
107 |
quiz_btn.click(fn=generate_mcqs, inputs=[quiz_text, quiz_slider], outputs=quiz_out)
|
108 |
|
109 |
with gr.Tab("π Weakness Analyzer"):
|