File size: 6,362 Bytes
f678735 5dfba26 481fdd4 5dfba26 481fdd4 5dfba26 481fdd4 5dfba26 481fdd4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import gradio as gr
import requests
import os
from dotenv import load_dotenv
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sentence_transformers import SentenceTransformer, util
# Load Hugging Face token
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# ----------------- FEATURE FUNCTIONS -----------------
def generate_quiz(text, num_questions):
API_URL = "https://api-inference.huggingface.co/models/mrm8488/t5-base-finetuned-question-generation-ap"
payload = {"inputs": f"generate questions: {text}", "parameters": {"max_length": 256}}
response = requests.post(API_URL, headers=headers, json=payload)
try:
result = response.json()[0]["generated_text"]
return f"{num_questions} Sample Questions:\n\n{result}"
except:
return "β οΈ Failed to generate quiz. Try again later."
def get_bot_response(query):
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
payload = {"inputs": f"Student: {query}\nAI:", "parameters": {"max_new_tokens": 150}}
response = requests.post(API_URL, headers=headers, json=payload)
try:
return response.json()[0]["generated_text"].split("AI:")[-1].strip()
except:
return "β οΈ AI Assistant unavailable right now."
def summarize_text(text):
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
payload = {"inputs": text, "parameters": {"max_length": 120}}
response = requests.post(API_URL, headers=headers, json=payload)
try:
return response.json()[0]["summary_text"]
except:
return "β οΈ Unable to summarize content."
def translate_text(text, target_lang="te"):
API_URL = "https://libretranslate.de/translate"
payload = {
"q": text,
"source": "en",
"target": target_lang,
"format": "text"
}
response = requests.post(API_URL, data=payload)
try:
return response.json()["translatedText"]
except:
return "β οΈ Translation failed."
def check_plagiarism(text1, text2):
model = SentenceTransformer('all-MiniLM-L6-v2')
embeddings = model.encode([text1, text2], convert_to_tensor=True)
similarity = util.pytorch_cos_sim(embeddings[0], embeddings[1]).item()
return f"Similarity Score: {similarity:.2f}\n{('β οΈ Possible Plagiarism' if similarity > 0.75 else 'β
No significant overlap')}"
def analyze_weakness(file):
try:
df = pd.read_csv(file.name)
topic_scores = df.groupby('Topic')['Score'].mean().sort_values()
weakest = topic_scores.head(3)
return f"Weak Areas:\n{weakest.to_string()}"
except:
return "β οΈ Failed to analyze file. Ensure it contains 'Topic' and 'Score' columns."
def predict_engagement(attendance, login_freq, video_watch):
X = np.array([[attendance, login_freq, video_watch]])
y = [0, 1, 1, 0, 1]
X_train = np.array([[90, 5, 80], [85, 4, 90], [95, 6, 85], [60, 2, 40], [88, 3, 75]])
clf = RandomForestClassifier().fit(X_train, y)
prediction = clf.predict(X)[0]
return "β
Likely to be Engaged" if prediction else "β οΈ At Risk of Disengagement"
def generate_badge(score, speed):
if score >= 90 and speed <= 30:
return "π
Gold Badge"
elif score >= 75:
return "π₯ Silver Badge"
else:
return "π₯ Bronze Badge"
# ----------------- UI -----------------
with gr.Blocks(title="Smart LMS AI Suite") as app:
with gr.Tab("π§ Quiz Generator"):
quiz_text = gr.Textbox(label="Paste Topic Content")
quiz_slider = gr.Slider(1, 10, label="Number of Questions", value=3)
quiz_output = gr.Textbox(label="Generated Quiz")
quiz_button = gr.Button("Generate Quiz")
quiz_button.click(fn=generate_quiz, inputs=[quiz_text, quiz_slider], outputs=quiz_output)
with gr.Tab("π€ AI Teaching Assistant"):
bot_input = gr.Textbox(label="Ask a Question")
bot_output = gr.Textbox(label="AI Answer")
bot_button = gr.Button("Get Answer")
bot_button.click(fn=get_bot_response, inputs=bot_input, outputs=bot_output)
with gr.Tab("π Summarizer"):
sum_input = gr.Textbox(label="Paste Content")
sum_output = gr.Textbox(label="Summary")
sum_button = gr.Button("Summarize")
sum_button.click(fn=summarize_text, inputs=sum_input, outputs=sum_output)
with gr.Tab("π Translator"):
trans_input = gr.Textbox(label="Text in English")
lang_dropdown = gr.Dropdown(["te", "hi", "ta", "fr"], value="te", label="Target Language Code")
trans_output = gr.Textbox(label="Translated Text")
trans_button = gr.Button("Translate")
trans_button.click(fn=translate_text, inputs=[trans_input, lang_dropdown], outputs=trans_output)
with gr.Tab("π§Ύ Plagiarism Checker"):
plag_1 = gr.Textbox(label="Document 1")
plag_2 = gr.Textbox(label="Document 2")
plag_out = gr.Textbox(label="Result")
plag_btn = gr.Button("Check Plagiarism")
plag_btn.click(fn=check_plagiarism, inputs=[plag_1, plag_2], outputs=plag_out)
with gr.Tab("π Weakness Analyzer"):
csv_input = gr.File(label="Upload CSV with 'Topic' and 'Score' Columns")
weak_out = gr.Textbox(label="Weak Topics")
weak_btn = gr.Button("Analyze")
weak_btn.click(fn=analyze_weakness, inputs=csv_input, outputs=weak_out)
with gr.Tab("π Engagement Predictor"):
att = gr.Slider(0, 100, value=85, label="Attendance %")
login = gr.Slider(0, 10, value=5, label="Login Frequency")
video = gr.Slider(0, 100, value=80, label="Video Watch %")
engage_out = gr.Textbox(label="Prediction")
engage_btn = gr.Button("Predict")
engage_btn.click(fn=predict_engagement, inputs=[att, login, video], outputs=engage_out)
with gr.Tab("π
Badge Generator"):
score = gr.Slider(0, 100, label="Score")
speed = gr.Slider(0, 60, label="Time Taken (mins)")
badge_out = gr.Textbox(label="Badge Awarded")
badge_btn = gr.Button("Generate Badge")
badge_btn.click(fn=generate_badge, inputs=[score, speed], outputs=badge_out)
gr.Markdown("π Built using Hugging Face, Gradio, and Free APIs")
app.launch()
|