File size: 5,642 Bytes
c3d6c3e 5dfba26 c3d6c3e 5dfba26 c3d6c3e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
""import gradio as gr
import pandas as pd
import matplotlib.pyplot as plt
import torch
from transformers import T5Tokenizer, T5ForConditionalGeneration
from sentence\_transformers import SentenceTransformer, util
import numpy as np
# ------------------------------
# Offline Quiz Generator
# ------------------------------
model\_qg = T5ForConditionalGeneration.from\_pretrained("t5-base")
tokenizer\_qg = T5Tokenizer.from\_pretrained("t5-base")
def generate\_mcqs(text, num\_questions=3):
input\_text = f"generate questions: {text}"
input\_ids = tokenizer\_qg.encode(input\_text, return\_tensors="pt", max\_length=512, truncation=True)
outputs = model\_qg.generate(input\_ids=input\_ids, max\_length=256, num\_return\_sequences=1)
return tokenizer\_qg.decode(outputs\[0], skip\_special\_tokens=True).strip()
# ------------------------------
# Weakness Analyzer
# ------------------------------
def analyze\_weakness(csv\_file):
df = pd.read\_csv(csv\_file.name)
summary = df.groupby("Topic")\["Score"].mean().sort\_values()
return summary.to\_string()
# ------------------------------
# Teaching Assistant
# ------------------------------
def chatbot\_response(message, history):
return "This is a placeholder response for now. (LLM not integrated)"
# ------------------------------
# Speech Question Solver
# ------------------------------
def speech\_answer(audio):
return "Audio to text transcription + answer generation is not included in offline version."
# ------------------------------
# PDF/YT Summarizer
# ------------------------------
def summarize\_text(text):
input\_text = f"summarize: {text.strip()}"
input\_ids = tokenizer\_qg.encode(input\_text, return\_tensors="pt", max\_length=512, truncation=True)
summary\_ids = model\_qg.generate(input\_ids, max\_length=150, min\_length=30, length\_penalty=5., num\_beams=2)
return tokenizer\_qg.decode(summary\_ids\[0], skip\_special\_tokens=True)
# ------------------------------
# Engagement Predictor (Mock)
# ------------------------------
def predict\_engagement(file):
df = pd.read\_csv(file.name)
avg\_time = df\['TimeSpent'].mean()
if avg\_time < 10:
return "β οΈ Risk of disengagement"
else:
return "β
Engaged student"
# ------------------------------
# Badge Generator
# ------------------------------
def generate\_badge(file):
df = pd.read\_csv(file.name)
avg\_score = df\['Score'].mean()
if avg\_score >= 80:
return "π
Gold Badge"
elif avg\_score >= 50:
return "π₯ Silver Badge"
else:
return "π₯ Bronze Badge"
# ------------------------------
# Translator (Mock - offline)
# ------------------------------
def translate\_text(text, target\_lang):
return f"(Translated to {target\_lang}) - This is a mock translation."
# ------------------------------
# Plagiarism Checker
# ------------------------------
model\_plag = SentenceTransformer('all-MiniLM-L6-v2')
def check\_plagiarism(text1, text2):
emb1 = model\_plag.encode(text1, convert\_to\_tensor=True)
emb2 = model\_plag.encode(text2, convert\_to\_tensor=True)
score = util.cos\_sim(emb1, emb2).item()
return f"Similarity Score: {score:.2f} - {'β οΈ Possible Plagiarism' if score > 0.8 else 'β
Looks Original'}"
# ------------------------------
# Gradio UI
# ------------------------------
with gr.Blocks() as demo:
gr.Markdown("# π AI-Powered LMS Suite (Offline Mode)")
```
with gr.Tab("π§ Quiz Generator"):
quiz_text = gr.Textbox(label="Content", lines=5)
quiz_slider = gr.Slider(1, 10, value=3, label="Number of Questions")
quiz_btn = gr.Button("Generate Quiz")
quiz_out = gr.Textbox(label="Generated Quiz")
quiz_btn.click(fn=generate_mcqs, inputs=[quiz_text, quiz_slider], outputs=quiz_out)
with gr.Tab("π Weakness Analyzer"):
weak_file = gr.File(label="Upload CSV with Topic & Score columns")
weak_btn = gr.Button("Analyze")
weak_out = gr.Textbox(label="Analysis")
weak_btn.click(fn=analyze_weakness, inputs=weak_file, outputs=weak_out)
with gr.Tab("π€ Teaching Assistant"):
chat = gr.ChatInterface(fn=chatbot_response)
with gr.Tab("π€ Speech Q Solver"):
audio_in = gr.Audio(source="microphone", type="filepath")
audio_btn = gr.Button("Answer")
audio_out = gr.Textbox()
audio_btn.click(fn=speech_answer, inputs=audio_in, outputs=audio_out)
with gr.Tab("π Summarizer"):
sum_text = gr.Textbox(lines=5, label="Paste Text")
sum_btn = gr.Button("Summarize")
sum_out = gr.Textbox(label="Summary")
sum_btn.click(fn=summarize_text, inputs=sum_text, outputs=sum_out)
with gr.Tab("π Engagement Predictor"):
eng_file = gr.File(label="Upload CSV with TimeSpent column")
eng_btn = gr.Button("Predict")
eng_out = gr.Textbox()
eng_btn.click(fn=predict_engagement, inputs=eng_file, outputs=eng_out)
with gr.Tab("π
Badge Generator"):
badge_file = gr.File(label="Upload CSV with Score column")
badge_btn = gr.Button("Get Badge")
badge_out = gr.Textbox()
badge_btn.click(fn=generate_badge, inputs=badge_file, outputs=badge_out)
with gr.Tab("π Translator"):
trans_in = gr.Textbox(label="Enter Text")
trans_lang = gr.Textbox(label="Target Language")
trans_btn = gr.Button("Translate")
trans_out = gr.Textbox()
trans_btn.click(fn=translate_text, inputs=[trans_in, trans_lang], outputs=trans_out)
with gr.Tab("π Plagiarism Checker"):
text1 = gr.Textbox(label="Text 1", lines=3)
text2 = gr.Textbox(label="Text 2", lines=3)
plag_btn = gr.Button("Check Similarity")
plag_out = gr.Textbox()
plag_btn.click(fn=check_plagiarism, inputs=[text1, text2], outputs=plag_out)
```
# Launch app
demo.launch()
|