Shivangi2005 commited on
Commit
0991b38
·
verified ·
1 Parent(s): 3615155

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -1,16 +1,21 @@
1
  # app.py
2
  import gradio as gr
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
 
4
 
5
  # ====== Load AI models ======
6
  # Summarization model (BART)
7
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
8
 
9
- # Question generation model (T5 small, CPU-friendly)
10
- # Manually load tokenizer to avoid SentencePiece fast tokenizer issues
11
  tokenizer = AutoTokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
12
  model = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
13
- question_generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
 
14
 
15
  # ====== Summarizer Function ======
16
  def summarize_text(text, mode):
@@ -31,15 +36,13 @@ def summarize_text(text, mode):
31
  def generate_quiz(text):
32
  if not text.strip():
33
  return "⚠️ Please provide some notes!"
34
- output = question_generator(f"generate questions: {text}", max_length=256)
35
- return output[0]['generated_text']
36
 
37
  # ====== Flashcards Generator ======
38
  def generate_flashcards(text):
39
  if not text.strip():
40
  return "⚠️ Please provide some notes!"
41
- output = question_generator(f"generate flashcards: {text}", max_length=256)
42
- return output[0]['generated_text']
43
 
44
  # ====== Gradio Interface ======
45
  with gr.Blocks() as demo:
 
1
  # app.py
2
  import gradio as gr
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
4
+ import torch
5
 
6
  # ====== Load AI models ======
7
  # Summarization model (BART)
8
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
9
 
10
+ # ====== Question / Flashcard model (T5 small) ======
 
11
  tokenizer = AutoTokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
12
  model = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
13
+
14
+ # ====== Helper function for manual generation ======
15
+ def generate_questions(text, max_length=128):
16
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
17
+ outputs = model.generate(**inputs, max_length=max_length)
18
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
19
 
20
  # ====== Summarizer Function ======
21
  def summarize_text(text, mode):
 
36
  def generate_quiz(text):
37
  if not text.strip():
38
  return "⚠️ Please provide some notes!"
39
+ return generate_questions(f"generate questions: {text}")
 
40
 
41
  # ====== Flashcards Generator ======
42
  def generate_flashcards(text):
43
  if not text.strip():
44
  return "⚠️ Please provide some notes!"
45
+ return generate_questions(f"generate flashcards: {text}")
 
46
 
47
  # ====== Gradio Interface ======
48
  with gr.Blocks() as demo: