Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,21 @@
|
|
1 |
# app.py
|
2 |
import gradio as gr
|
3 |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
|
|
4 |
|
5 |
# ====== Load AI models ======
|
6 |
# Summarization model (BART)
|
7 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
8 |
|
9 |
-
# Question
|
10 |
-
# Manually load tokenizer to avoid SentencePiece fast tokenizer issues
|
11 |
tokenizer = AutoTokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
|
12 |
model = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# ====== Summarizer Function ======
|
16 |
def summarize_text(text, mode):
|
@@ -31,15 +36,13 @@ def summarize_text(text, mode):
|
|
31 |
def generate_quiz(text):
|
32 |
if not text.strip():
|
33 |
return "⚠️ Please provide some notes!"
|
34 |
-
|
35 |
-
return output[0]['generated_text']
|
36 |
|
37 |
# ====== Flashcards Generator ======
|
38 |
def generate_flashcards(text):
|
39 |
if not text.strip():
|
40 |
return "⚠️ Please provide some notes!"
|
41 |
-
|
42 |
-
return output[0]['generated_text']
|
43 |
|
44 |
# ====== Gradio Interface ======
|
45 |
with gr.Blocks() as demo:
|
|
|
1 |
# app.py
|
2 |
import gradio as gr
|
3 |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
+
import torch
|
5 |
|
6 |
# ====== Load AI models ======
|
7 |
# Summarization model (BART)
|
8 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
9 |
|
10 |
+
# ====== Question / Flashcard model (T5 small) ======
|
|
|
11 |
tokenizer = AutoTokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
|
12 |
model = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
|
13 |
+
|
14 |
+
# ====== Helper function for manual generation ======
|
15 |
+
def generate_questions(text, max_length=128):
|
16 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
17 |
+
outputs = model.generate(**inputs, max_length=max_length)
|
18 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
19 |
|
20 |
# ====== Summarizer Function ======
|
21 |
def summarize_text(text, mode):
|
|
|
36 |
def generate_quiz(text):
|
37 |
if not text.strip():
|
38 |
return "⚠️ Please provide some notes!"
|
39 |
+
return generate_questions(f"generate questions: {text}")
|
|
|
40 |
|
41 |
# ====== Flashcards Generator ======
|
42 |
def generate_flashcards(text):
|
43 |
if not text.strip():
|
44 |
return "⚠️ Please provide some notes!"
|
45 |
+
return generate_questions(f"generate flashcards: {text}")
|
|
|
46 |
|
47 |
# ====== Gradio Interface ======
|
48 |
with gr.Blocks() as demo:
|