Update app.py
Browse files
app.py
CHANGED
|
@@ -1,33 +1,26 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
import torch
|
| 4 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,
|
| 5 |
from sentence_transformers import SentenceTransformer, util
|
| 6 |
|
| 7 |
# ------------------------------
|
| 8 |
# Load Models
|
| 9 |
# ------------------------------
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
quiz_model = AutoModelForSeq2SeqLM.from_pretrained(quiz_model_name)
|
| 14 |
-
|
| 15 |
-
# For summarizer and fallback tasks
|
| 16 |
-
default_model_name = "t5-base"
|
| 17 |
-
tokenizer_qg = T5Tokenizer.from_pretrained(default_model_name)
|
| 18 |
-
model_qg = T5ForConditionalGeneration.from_pretrained(default_model_name)
|
| 19 |
-
|
| 20 |
-
# For plagiarism detection
|
| 21 |
model_plag = SentenceTransformer('all-MiniLM-L6-v2')
|
|
|
|
| 22 |
|
| 23 |
# ------------------------------
|
| 24 |
-
# Quiz Generator
|
| 25 |
# ------------------------------
|
| 26 |
def generate_mcqs(text, num_questions=3):
|
| 27 |
input_text = f"generate questions: {text.strip()}"
|
| 28 |
-
input_ids =
|
| 29 |
|
| 30 |
-
outputs =
|
| 31 |
input_ids=input_ids,
|
| 32 |
max_length=256,
|
| 33 |
num_return_sequences=num_questions,
|
|
@@ -36,7 +29,7 @@ def generate_mcqs(text, num_questions=3):
|
|
| 36 |
top_p=0.95
|
| 37 |
)
|
| 38 |
|
| 39 |
-
questions = [
|
| 40 |
return "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)])
|
| 41 |
|
| 42 |
# ------------------------------
|
|
@@ -54,10 +47,15 @@ def chatbot_response(message, history):
|
|
| 54 |
return "This is a placeholder response for now. (LLM not integrated)"
|
| 55 |
|
| 56 |
# ------------------------------
|
| 57 |
-
# Speech Question Solver (
|
| 58 |
# ------------------------------
|
| 59 |
-
def speech_answer(
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
# ------------------------------
|
| 63 |
# Summarizer
|
|
@@ -108,7 +106,7 @@ def check_plagiarism(text1, text2):
|
|
| 108 |
# Gradio Interface
|
| 109 |
# ------------------------------
|
| 110 |
with gr.Blocks() as demo:
|
| 111 |
-
gr.Markdown("# π Smart LMS Suite (
|
| 112 |
|
| 113 |
with gr.Tab("π§ Quiz Generator"):
|
| 114 |
quiz_text = gr.Textbox(label="π Input Content", lines=6, placeholder="Paste a paragraph here...")
|
|
@@ -128,7 +126,7 @@ with gr.Blocks() as demo:
|
|
| 128 |
|
| 129 |
with gr.Tab("π€ Speech Q Solver"):
|
| 130 |
audio_in = gr.Audio(label="Upload Audio", type="filepath")
|
| 131 |
-
audio_btn = gr.Button("
|
| 132 |
audio_out = gr.Textbox(label="Answer")
|
| 133 |
audio_btn.click(fn=speech_answer, inputs=audio_in, outputs=audio_out)
|
| 134 |
|
|
@@ -165,6 +163,6 @@ with gr.Blocks() as demo:
|
|
| 165 |
plag_btn.click(fn=check_plagiarism, inputs=[text1, text2], outputs=plag_out)
|
| 166 |
|
| 167 |
# ------------------------------
|
| 168 |
-
# Launch
|
| 169 |
# ------------------------------
|
| 170 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
import torch
|
| 4 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 5 |
from sentence_transformers import SentenceTransformer, util
|
| 6 |
|
| 7 |
# ------------------------------
|
| 8 |
# Load Models
|
| 9 |
# ------------------------------
|
| 10 |
+
qg_model_name = "iarfmoose/t5-base-question-generator"
|
| 11 |
+
tokenizer_qg = AutoTokenizer.from_pretrained(qg_model_name)
|
| 12 |
+
model_qg = AutoModelForSeq2SeqLM.from_pretrained(qg_model_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
model_plag = SentenceTransformer('all-MiniLM-L6-v2')
|
| 14 |
+
asr = pipeline("automatic-speech-recognition", model="openai/whisper-base")
|
| 15 |
|
| 16 |
# ------------------------------
|
| 17 |
+
# Quiz Generator
|
| 18 |
# ------------------------------
|
| 19 |
def generate_mcqs(text, num_questions=3):
|
| 20 |
input_text = f"generate questions: {text.strip()}"
|
| 21 |
+
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
| 22 |
|
| 23 |
+
outputs = model_qg.generate(
|
| 24 |
input_ids=input_ids,
|
| 25 |
max_length=256,
|
| 26 |
num_return_sequences=num_questions,
|
|
|
|
| 29 |
top_p=0.95
|
| 30 |
)
|
| 31 |
|
| 32 |
+
questions = [tokenizer_qg.decode(out, skip_special_tokens=True).strip() for out in outputs]
|
| 33 |
return "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)])
|
| 34 |
|
| 35 |
# ------------------------------
|
|
|
|
| 47 |
return "This is a placeholder response for now. (LLM not integrated)"
|
| 48 |
|
| 49 |
# ------------------------------
|
| 50 |
+
# Speech Question Solver (NEW)
|
| 51 |
# ------------------------------
|
| 52 |
+
def speech_answer(audio_file_path):
|
| 53 |
+
transcription = asr(audio_file_path)["text"]
|
| 54 |
+
input_text = f"generate questions: {transcription.strip()}"
|
| 55 |
+
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
| 56 |
+
outputs = model_qg.generate(input_ids, max_length=256, num_return_sequences=1)
|
| 57 |
+
response = tokenizer_qg.decode(outputs[0], skip_special_tokens=True)
|
| 58 |
+
return f"π£οΈ Transcript: {transcription.strip()}\n\nπ‘ Answer: {response.strip()}"
|
| 59 |
|
| 60 |
# ------------------------------
|
| 61 |
# Summarizer
|
|
|
|
| 106 |
# Gradio Interface
|
| 107 |
# ------------------------------
|
| 108 |
with gr.Blocks() as demo:
|
| 109 |
+
gr.Markdown("# π Smart LMS Suite (Offline)")
|
| 110 |
|
| 111 |
with gr.Tab("π§ Quiz Generator"):
|
| 112 |
quiz_text = gr.Textbox(label="π Input Content", lines=6, placeholder="Paste a paragraph here...")
|
|
|
|
| 126 |
|
| 127 |
with gr.Tab("π€ Speech Q Solver"):
|
| 128 |
audio_in = gr.Audio(label="Upload Audio", type="filepath")
|
| 129 |
+
audio_btn = gr.Button("Transcribe + Generate Answer")
|
| 130 |
audio_out = gr.Textbox(label="Answer")
|
| 131 |
audio_btn.click(fn=speech_answer, inputs=audio_in, outputs=audio_out)
|
| 132 |
|
|
|
|
| 163 |
plag_btn.click(fn=check_plagiarism, inputs=[text1, text2], outputs=plag_out)
|
| 164 |
|
| 165 |
# ------------------------------
|
| 166 |
+
# Launch
|
| 167 |
# ------------------------------
|
| 168 |
demo.launch()
|