File size: 6,852 Bytes
f678735
5dfba26
 
 
 
 
 
 
 
481fdd4
5dfba26
 
 
 
481fdd4
 
5dfba26
7209ab6
 
 
 
 
9217968
f52d1cb
 
 
7209ab6
f52d1cb
9217968
5dfba26
7209ab6
5dfba26
9217968
 
7209ab6
 
9217968
7209ab6
f52d1cb
7209ab6
 
9217968
f52d1cb
5dfba26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481fdd4
5dfba26
 
 
 
 
 
 
 
 
 
 
 
 
481fdd4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import gradio as gr
import requests
import os
from dotenv import load_dotenv
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sentence_transformers import SentenceTransformer, util

# Load Hugging Face token
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}

# ----------------- FEATURE FUNCTIONS -----------------

def generate_quiz(text, num_questions):
    prompt = (
        f"Generate {num_questions} simple quiz questions from the following text:\n\n"
        f"{text}\n\n"
        f"Each question should be followed by options A, B, C, D and a correct answer."
    )

    API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-small"
    payload = {
        "inputs": prompt,
        "parameters": {"max_new_tokens": 300}
    }

    response = requests.post(API_URL, headers=headers, json=payload)
    
    try:
        result = response.json()
        if isinstance(result, list) and "generated_text" in result[0]:
            output = result[0]["generated_text"].strip()
            return output if output else "⚠️ Model returned empty text. Try reducing question count or using a smaller paragraph."
        else:
            return f"⚠️ Model didn't return expected output. Try fewer questions or simpler content."
    except Exception as e:
        return f"⚠️ API Error: {e}"




def get_bot_response(query):
    API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
    payload = {"inputs": f"Student: {query}\nAI:", "parameters": {"max_new_tokens": 150}}
    response = requests.post(API_URL, headers=headers, json=payload)
    try:
        return response.json()[0]["generated_text"].split("AI:")[-1].strip()
    except:
        return "⚠️ AI Assistant unavailable right now."

def summarize_text(text):
    API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
    payload = {"inputs": text, "parameters": {"max_length": 120}}
    response = requests.post(API_URL, headers=headers, json=payload)
    try:
        return response.json()[0]["summary_text"]
    except:
        return "⚠️ Unable to summarize content."

def translate_text(text, target_lang="te"):
    API_URL = "https://libretranslate.de/translate"
    payload = {
        "q": text,
        "source": "en",
        "target": target_lang,
        "format": "text"
    }
    response = requests.post(API_URL, data=payload)
    try:
        return response.json()["translatedText"]
    except:
        return "⚠️ Translation failed."

def check_plagiarism(text1, text2):
    model = SentenceTransformer('all-MiniLM-L6-v2')
    embeddings = model.encode([text1, text2], convert_to_tensor=True)
    similarity = util.pytorch_cos_sim(embeddings[0], embeddings[1]).item()
    return f"Similarity Score: {similarity:.2f}\n{('⚠️ Possible Plagiarism' if similarity > 0.75 else 'βœ… No significant overlap')}"

def analyze_weakness(file):
    try:
        df = pd.read_csv(file.name)
        topic_scores = df.groupby('Topic')['Score'].mean().sort_values()
        weakest = topic_scores.head(3)
        return f"Weak Areas:\n{weakest.to_string()}"
    except:
        return "⚠️ Failed to analyze file. Ensure it contains 'Topic' and 'Score' columns."

def predict_engagement(attendance, login_freq, video_watch):
    X = np.array([[attendance, login_freq, video_watch]])
    y = [0, 1, 1, 0, 1]
    X_train = np.array([[90, 5, 80], [85, 4, 90], [95, 6, 85], [60, 2, 40], [88, 3, 75]])
    clf = RandomForestClassifier().fit(X_train, y)
    prediction = clf.predict(X)[0]
    return "βœ… Likely to be Engaged" if prediction else "⚠️ At Risk of Disengagement"

def generate_badge(score, speed):
    if score >= 90 and speed <= 30:
        return "πŸ… Gold Badge"
    elif score >= 75:
        return "πŸ₯ˆ Silver Badge"
    else:
        return "πŸ₯‰ Bronze Badge"

# ----------------- UI -----------------
with gr.Blocks(title="Smart LMS AI Suite") as app:

    with gr.Tab("🧠 Quiz Generator"):
        quiz_text = gr.Textbox(label="Paste Topic Content")
        quiz_slider = gr.Slider(1, 10, label="Number of Questions", value=3)
        quiz_output = gr.Textbox(label="Generated Quiz")
        quiz_button = gr.Button("Generate Quiz")
        quiz_button.click(fn=generate_quiz, inputs=[quiz_text, quiz_slider], outputs=quiz_output)

    with gr.Tab("πŸ€– AI Teaching Assistant"):
        bot_input = gr.Textbox(label="Ask a Question")
        bot_output = gr.Textbox(label="AI Answer")
        bot_button = gr.Button("Get Answer")
        bot_button.click(fn=get_bot_response, inputs=bot_input, outputs=bot_output)

    with gr.Tab("πŸ“„ Summarizer"):
        sum_input = gr.Textbox(label="Paste Content")
        sum_output = gr.Textbox(label="Summary")
        sum_button = gr.Button("Summarize")
        sum_button.click(fn=summarize_text, inputs=sum_input, outputs=sum_output)

    with gr.Tab("🌍 Translator"):
        trans_input = gr.Textbox(label="Text in English")
        lang_dropdown = gr.Dropdown(["te", "hi", "ta", "fr"], value="te", label="Target Language Code")
        trans_output = gr.Textbox(label="Translated Text")
        trans_button = gr.Button("Translate")
        trans_button.click(fn=translate_text, inputs=[trans_input, lang_dropdown], outputs=trans_output)

    with gr.Tab("🧾 Plagiarism Checker"):
        plag_1 = gr.Textbox(label="Document 1")
        plag_2 = gr.Textbox(label="Document 2")
        plag_out = gr.Textbox(label="Result")
        plag_btn = gr.Button("Check Plagiarism")
        plag_btn.click(fn=check_plagiarism, inputs=[plag_1, plag_2], outputs=plag_out)

    with gr.Tab("πŸ“‰ Weakness Analyzer"):
        csv_input = gr.File(label="Upload CSV with 'Topic' and 'Score' Columns")
        weak_out = gr.Textbox(label="Weak Topics")
        weak_btn = gr.Button("Analyze")
        weak_btn.click(fn=analyze_weakness, inputs=csv_input, outputs=weak_out)

    with gr.Tab("πŸ“Š Engagement Predictor"):
        att = gr.Slider(0, 100, value=85, label="Attendance %")
        login = gr.Slider(0, 10, value=5, label="Login Frequency")
        video = gr.Slider(0, 100, value=80, label="Video Watch %")
        engage_out = gr.Textbox(label="Prediction")
        engage_btn = gr.Button("Predict")
        engage_btn.click(fn=predict_engagement, inputs=[att, login, video], outputs=engage_out)

    with gr.Tab("πŸ… Badge Generator"):
        score = gr.Slider(0, 100, label="Score")
        speed = gr.Slider(0, 60, label="Time Taken (mins)")
        badge_out = gr.Textbox(label="Badge Awarded")
        badge_btn = gr.Button("Generate Badge")
        badge_btn.click(fn=generate_badge, inputs=[score, speed], outputs=badge_out)

    gr.Markdown("πŸš€ Built using Hugging Face, Gradio, and Free APIs")

app.launch()