smart-lms-suite / app.py
sathwikabhavaraju2005's picture
Update app.py
1e691eb verified
raw
history blame
5.85 kB
import gradio as gr
import pandas as pd
import torch
from transformers import T5Tokenizer, T5ForConditionalGeneration
from sentence_transformers import SentenceTransformer, util
# ------------------------------
# Offline Quiz Generator
# ------------------------------
model_qg = T5ForConditionalGeneration.from_pretrained("t5-base")
tokenizer_qg = T5Tokenizer.from_pretrained("t5-base")
def generate_mcqs(text, num_questions=3):
input_text = f"generate questions: {text}"
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
outputs = model_qg.generate(input_ids=input_ids, max_length=256, num_return_sequences=1)
return tokenizer_qg.decode(outputs[0], skip_special_tokens=True).strip()
# ------------------------------
# Weakness Analyzer
# ------------------------------
def analyze_weakness(csv_file):
df = pd.read_csv(csv_file.name)
summary = df.groupby("Topic")["Score"].mean().sort_values()
return summary.to_string()
# ------------------------------
# Teaching Assistant (Placeholder)
# ------------------------------
def chatbot_response(message, history):
return "This is a placeholder response for now. (LLM not integrated)"
# ------------------------------
# Speech Question Solver (Mock)
# ------------------------------
def speech_answer(audio):
return "Audio transcription and answer generation not supported offline."
# ------------------------------
# Summarizer
# ------------------------------
def summarize_text(text):
input_text = f"summarize: {text.strip()}"
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
summary_ids = model_qg.generate(input_ids, max_length=150, min_length=30, length_penalty=5.0, num_beams=2)
return tokenizer_qg.decode(summary_ids[0], skip_special_tokens=True)
# ------------------------------
# Engagement Predictor (Mock)
# ------------------------------
def predict_engagement(file):
df = pd.read_csv(file.name)
avg_time = df["TimeSpent"].mean()
return "βœ… Engaged student" if avg_time >= 10 else "⚠️ Risk of disengagement"
# ------------------------------
# Badge Generator
# ------------------------------
def generate_badge(file):
df = pd.read_csv(file.name)
avg_score = df["Score"].mean()
if avg_score >= 80:
return "πŸ… Gold Badge"
elif avg_score >= 50:
return "πŸ₯ˆ Silver Badge"
else:
return "πŸ₯‰ Bronze Badge"
# ------------------------------
# Translator (Mock)
# ------------------------------
def translate_text(text, target_lang):
return f"(Translated to {target_lang}) - This is a mock translation."
# ------------------------------
# Plagiarism Checker
# ------------------------------
model_plag = SentenceTransformer('all-MiniLM-L6-v2')
def check_plagiarism(text1, text2):
emb1 = model_plag.encode(text1, convert_to_tensor=True)
emb2 = model_plag.encode(text2, convert_to_tensor=True)
score = util.cos_sim(emb1, emb2).item()
return f"Similarity Score: {score:.2f} - {'⚠️ Possible Plagiarism' if score > 0.8 else 'βœ… Looks Original'}"
# ------------------------------
# Gradio Interface
# ------------------------------
with gr.Blocks() as demo:
gr.Markdown("# πŸ“š AI-Powered LMS Suite (Offline Mode)")
with gr.Tab("🧠 Quiz Generator"):
quiz_text = gr.Textbox(label="Content", lines=5)
quiz_slider = gr.Slider(1, 10, value=3, label="Number of Questions")
quiz_btn = gr.Button("Generate Quiz")
quiz_out = gr.Textbox(label="Generated Quiz")
quiz_btn.click(fn=generate_mcqs, inputs=[quiz_text, quiz_slider], outputs=quiz_out)
with gr.Tab("πŸ“‰ Weakness Analyzer"):
weak_file = gr.File(label="Upload CSV with Topic & Score columns")
weak_btn = gr.Button("Analyze")
weak_out = gr.Textbox(label="Analysis")
weak_btn.click(fn=analyze_weakness, inputs=weak_file, outputs=weak_out)
with gr.Tab("πŸ€– Teaching Assistant"):
gr.ChatInterface(fn=chatbot_response)
with gr.Tab("🎀 Speech Q Solver"):
audio_in = gr.Audio(label="Upload Audio", type="filepath")
audio_btn = gr.Button("Get Answer")
audio_out = gr.Textbox(label="Answer")
audio_btn.click(fn=speech_answer, inputs=audio_in, outputs=audio_out)
with gr.Tab("πŸ“„ Summarizer"):
sum_text = gr.Textbox(lines=5, label="Paste Text")
sum_btn = gr.Button("Summarize")
sum_out = gr.Textbox(label="Summary")
sum_btn.click(fn=summarize_text, inputs=sum_text, outputs=sum_out)
with gr.Tab("πŸ“Š Engagement Predictor"):
eng_file = gr.File(label="Upload CSV with TimeSpent column")
eng_btn = gr.Button("Predict")
eng_out = gr.Textbox()
eng_btn.click(fn=predict_engagement, inputs=eng_file, outputs=eng_out)
with gr.Tab("πŸ… Badge Generator"):
badge_file = gr.File(label="Upload CSV with Score column")
badge_btn = gr.Button("Get Badge")
badge_out = gr.Textbox()
badge_btn.click(fn=generate_badge, inputs=badge_file, outputs=badge_out)
with gr.Tab("🌍 Translator"):
trans_in = gr.Textbox(label="Enter Text")
trans_lang = gr.Textbox(label="Target Language")
trans_btn = gr.Button("Translate")
trans_out = gr.Textbox()
trans_btn.click(fn=translate_text, inputs=[trans_in, trans_lang], outputs=trans_out)
with gr.Tab("πŸ“‹ Plagiarism Checker"):
text1 = gr.Textbox(label="Text 1", lines=3)
text2 = gr.Textbox(label="Text 2", lines=3)
plag_btn = gr.Button("Check Similarity")
plag_out = gr.Textbox()
plag_btn.click(fn=check_plagiarism, inputs=[text1, text2], outputs=plag_out)
# ------------------------------
# Launch
# ------------------------------
demo.launch()