smart-lms-suite / app.py
sathwikabhavaraju2005's picture
Update app.py
7209ab6 verified
raw
history blame
6.85 kB
import gradio as gr
import requests
import os
from dotenv import load_dotenv
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sentence_transformers import SentenceTransformer, util
# Load Hugging Face token
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# ----------------- FEATURE FUNCTIONS -----------------
def generate_quiz(text, num_questions):
prompt = (
f"Generate {num_questions} simple quiz questions from the following text:\n\n"
f"{text}\n\n"
f"Each question should be followed by options A, B, C, D and a correct answer."
)
API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small"
payload = {
"inputs": prompt,
"parameters": {"max_new_tokens": 300}
}
response = requests.post(API_URL, headers=headers, json=payload)
try:
result = response.json()
if isinstance(result, list) and "generated_text" in result[0]:
output = result[0]["generated_text"].strip()
return output if output else "⚠️ Model returned empty text. Try reducing question count or using a smaller paragraph."
else:
return f"⚠️ Model didn't return expected output. Try fewer questions or simpler content."
except Exception as e:
return f"⚠️ API Error: {e}"
def get_bot_response(query):
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
payload = {"inputs": f"Student: {query}\nAI:", "parameters": {"max_new_tokens": 150}}
response = requests.post(API_URL, headers=headers, json=payload)
try:
return response.json()[0]["generated_text"].split("AI:")[-1].strip()
except:
return "⚠️ AI Assistant unavailable right now."
def summarize_text(text):
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
payload = {"inputs": text, "parameters": {"max_length": 120}}
response = requests.post(API_URL, headers=headers, json=payload)
try:
return response.json()[0]["summary_text"]
except:
return "⚠️ Unable to summarize content."
def translate_text(text, target_lang="te"):
API_URL = "https://libretranslate.de/translate"
payload = {
"q": text,
"source": "en",
"target": target_lang,
"format": "text"
}
response = requests.post(API_URL, data=payload)
try:
return response.json()["translatedText"]
except:
return "⚠️ Translation failed."
def check_plagiarism(text1, text2):
model = SentenceTransformer('all-MiniLM-L6-v2')
embeddings = model.encode([text1, text2], convert_to_tensor=True)
similarity = util.pytorch_cos_sim(embeddings[0], embeddings[1]).item()
return f"Similarity Score: {similarity:.2f}\n{('⚠️ Possible Plagiarism' if similarity > 0.75 else 'βœ… No significant overlap')}"
def analyze_weakness(file):
try:
df = pd.read_csv(file.name)
topic_scores = df.groupby('Topic')['Score'].mean().sort_values()
weakest = topic_scores.head(3)
return f"Weak Areas:\n{weakest.to_string()}"
except:
return "⚠️ Failed to analyze file. Ensure it contains 'Topic' and 'Score' columns."
def predict_engagement(attendance, login_freq, video_watch):
X = np.array([[attendance, login_freq, video_watch]])
y = [0, 1, 1, 0, 1]
X_train = np.array([[90, 5, 80], [85, 4, 90], [95, 6, 85], [60, 2, 40], [88, 3, 75]])
clf = RandomForestClassifier().fit(X_train, y)
prediction = clf.predict(X)[0]
return "βœ… Likely to be Engaged" if prediction else "⚠️ At Risk of Disengagement"
def generate_badge(score, speed):
if score >= 90 and speed <= 30:
return "πŸ… Gold Badge"
elif score >= 75:
return "πŸ₯ˆ Silver Badge"
else:
return "πŸ₯‰ Bronze Badge"
# ----------------- UI -----------------
with gr.Blocks(title="Smart LMS AI Suite") as app:
with gr.Tab("🧠 Quiz Generator"):
quiz_text = gr.Textbox(label="Paste Topic Content")
quiz_slider = gr.Slider(1, 10, label="Number of Questions", value=3)
quiz_output = gr.Textbox(label="Generated Quiz")
quiz_button = gr.Button("Generate Quiz")
quiz_button.click(fn=generate_quiz, inputs=[quiz_text, quiz_slider], outputs=quiz_output)
with gr.Tab("πŸ€– AI Teaching Assistant"):
bot_input = gr.Textbox(label="Ask a Question")
bot_output = gr.Textbox(label="AI Answer")
bot_button = gr.Button("Get Answer")
bot_button.click(fn=get_bot_response, inputs=bot_input, outputs=bot_output)
with gr.Tab("πŸ“„ Summarizer"):
sum_input = gr.Textbox(label="Paste Content")
sum_output = gr.Textbox(label="Summary")
sum_button = gr.Button("Summarize")
sum_button.click(fn=summarize_text, inputs=sum_input, outputs=sum_output)
with gr.Tab("🌍 Translator"):
trans_input = gr.Textbox(label="Text in English")
lang_dropdown = gr.Dropdown(["te", "hi", "ta", "fr"], value="te", label="Target Language Code")
trans_output = gr.Textbox(label="Translated Text")
trans_button = gr.Button("Translate")
trans_button.click(fn=translate_text, inputs=[trans_input, lang_dropdown], outputs=trans_output)
with gr.Tab("🧾 Plagiarism Checker"):
plag_1 = gr.Textbox(label="Document 1")
plag_2 = gr.Textbox(label="Document 2")
plag_out = gr.Textbox(label="Result")
plag_btn = gr.Button("Check Plagiarism")
plag_btn.click(fn=check_plagiarism, inputs=[plag_1, plag_2], outputs=plag_out)
with gr.Tab("πŸ“‰ Weakness Analyzer"):
csv_input = gr.File(label="Upload CSV with 'Topic' and 'Score' Columns")
weak_out = gr.Textbox(label="Weak Topics")
weak_btn = gr.Button("Analyze")
weak_btn.click(fn=analyze_weakness, inputs=csv_input, outputs=weak_out)
with gr.Tab("πŸ“Š Engagement Predictor"):
att = gr.Slider(0, 100, value=85, label="Attendance %")
login = gr.Slider(0, 10, value=5, label="Login Frequency")
video = gr.Slider(0, 100, value=80, label="Video Watch %")
engage_out = gr.Textbox(label="Prediction")
engage_btn = gr.Button("Predict")
engage_btn.click(fn=predict_engagement, inputs=[att, login, video], outputs=engage_out)
with gr.Tab("πŸ… Badge Generator"):
score = gr.Slider(0, 100, label="Score")
speed = gr.Slider(0, 60, label="Time Taken (mins)")
badge_out = gr.Textbox(label="Badge Awarded")
badge_btn = gr.Button("Generate Badge")
badge_btn.click(fn=generate_badge, inputs=[score, speed], outputs=badge_out)
gr.Markdown("πŸš€ Built using Hugging Face, Gradio, and Free APIs")
app.launch()