File size: 5,847 Bytes
3576625 5dfba26 c3d6c3e e8c442a c3d6c3e e8c442a c3d6c3e e8c442a c3d6c3e e8c442a c3d6c3e 436047e c3d6c3e e8c442a c3d6c3e 436047e c3d6c3e e8c442a 436047e c3d6c3e 436047e c3d6c3e e8c442a 436047e e8c442a c3d6c3e e8c442a 436047e c3d6c3e e8c442a 436047e e8c442a c3d6c3e e8c442a c3d6c3e e8c442a c3d6c3e e8c442a c3d6c3e e8c442a c3d6c3e 436047e c3d6c3e e8c442a 436047e e8c442a 1e691eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import gradio as gr
import pandas as pd
import torch
from transformers import T5Tokenizer, T5ForConditionalGeneration
from sentence_transformers import SentenceTransformer, util
# ------------------------------
# Offline Quiz Generator
# ------------------------------
model_qg = T5ForConditionalGeneration.from_pretrained("t5-base")
tokenizer_qg = T5Tokenizer.from_pretrained("t5-base")
def generate_mcqs(text, num_questions=3):
input_text = f"generate questions: {text}"
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
outputs = model_qg.generate(input_ids=input_ids, max_length=256, num_return_sequences=1)
return tokenizer_qg.decode(outputs[0], skip_special_tokens=True).strip()
# ------------------------------
# Weakness Analyzer
# ------------------------------
def analyze_weakness(csv_file):
df = pd.read_csv(csv_file.name)
summary = df.groupby("Topic")["Score"].mean().sort_values()
return summary.to_string()
# ------------------------------
# Teaching Assistant (Placeholder)
# ------------------------------
def chatbot_response(message, history):
return "This is a placeholder response for now. (LLM not integrated)"
# ------------------------------
# Speech Question Solver (Mock)
# ------------------------------
def speech_answer(audio):
return "Audio transcription and answer generation not supported offline."
# ------------------------------
# Summarizer
# ------------------------------
def summarize_text(text):
input_text = f"summarize: {text.strip()}"
input_ids = tokenizer_qg.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
summary_ids = model_qg.generate(input_ids, max_length=150, min_length=30, length_penalty=5.0, num_beams=2)
return tokenizer_qg.decode(summary_ids[0], skip_special_tokens=True)
# ------------------------------
# Engagement Predictor (Mock)
# ------------------------------
def predict_engagement(file):
df = pd.read_csv(file.name)
avg_time = df["TimeSpent"].mean()
return "β
Engaged student" if avg_time >= 10 else "β οΈ Risk of disengagement"
# ------------------------------
# Badge Generator
# ------------------------------
def generate_badge(file):
df = pd.read_csv(file.name)
avg_score = df["Score"].mean()
if avg_score >= 80:
return "π
Gold Badge"
elif avg_score >= 50:
return "π₯ Silver Badge"
else:
return "π₯ Bronze Badge"
# ------------------------------
# Translator (Mock)
# ------------------------------
def translate_text(text, target_lang):
return f"(Translated to {target_lang}) - This is a mock translation."
# ------------------------------
# Plagiarism Checker
# ------------------------------
model_plag = SentenceTransformer('all-MiniLM-L6-v2')
def check_plagiarism(text1, text2):
emb1 = model_plag.encode(text1, convert_to_tensor=True)
emb2 = model_plag.encode(text2, convert_to_tensor=True)
score = util.cos_sim(emb1, emb2).item()
return f"Similarity Score: {score:.2f} - {'β οΈ Possible Plagiarism' if score > 0.8 else 'β
Looks Original'}"
# ------------------------------
# Gradio Interface
# ------------------------------
with gr.Blocks() as demo:
gr.Markdown("# π AI-Powered LMS Suite (Offline Mode)")
with gr.Tab("π§ Quiz Generator"):
quiz_text = gr.Textbox(label="Content", lines=5)
quiz_slider = gr.Slider(1, 10, value=3, label="Number of Questions")
quiz_btn = gr.Button("Generate Quiz")
quiz_out = gr.Textbox(label="Generated Quiz")
quiz_btn.click(fn=generate_mcqs, inputs=[quiz_text, quiz_slider], outputs=quiz_out)
with gr.Tab("π Weakness Analyzer"):
weak_file = gr.File(label="Upload CSV with Topic & Score columns")
weak_btn = gr.Button("Analyze")
weak_out = gr.Textbox(label="Analysis")
weak_btn.click(fn=analyze_weakness, inputs=weak_file, outputs=weak_out)
with gr.Tab("π€ Teaching Assistant"):
gr.ChatInterface(fn=chatbot_response)
with gr.Tab("π€ Speech Q Solver"):
audio_in = gr.Audio(label="Upload Audio", type="filepath")
audio_btn = gr.Button("Get Answer")
audio_out = gr.Textbox(label="Answer")
audio_btn.click(fn=speech_answer, inputs=audio_in, outputs=audio_out)
with gr.Tab("π Summarizer"):
sum_text = gr.Textbox(lines=5, label="Paste Text")
sum_btn = gr.Button("Summarize")
sum_out = gr.Textbox(label="Summary")
sum_btn.click(fn=summarize_text, inputs=sum_text, outputs=sum_out)
with gr.Tab("π Engagement Predictor"):
eng_file = gr.File(label="Upload CSV with TimeSpent column")
eng_btn = gr.Button("Predict")
eng_out = gr.Textbox()
eng_btn.click(fn=predict_engagement, inputs=eng_file, outputs=eng_out)
with gr.Tab("π
Badge Generator"):
badge_file = gr.File(label="Upload CSV with Score column")
badge_btn = gr.Button("Get Badge")
badge_out = gr.Textbox()
badge_btn.click(fn=generate_badge, inputs=badge_file, outputs=badge_out)
with gr.Tab("π Translator"):
trans_in = gr.Textbox(label="Enter Text")
trans_lang = gr.Textbox(label="Target Language")
trans_btn = gr.Button("Translate")
trans_out = gr.Textbox()
trans_btn.click(fn=translate_text, inputs=[trans_in, trans_lang], outputs=trans_out)
with gr.Tab("π Plagiarism Checker"):
text1 = gr.Textbox(label="Text 1", lines=3)
text2 = gr.Textbox(label="Text 2", lines=3)
plag_btn = gr.Button("Check Similarity")
plag_out = gr.Textbox()
plag_btn.click(fn=check_plagiarism, inputs=[text1, text2], outputs=plag_out)
# ------------------------------
# Launch
# ------------------------------
demo.launch()
|