Shivangi2005 commited on
Commit
3615155
·
verified ·
1 Parent(s): f82f779

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -1,13 +1,16 @@
1
  # app.py
2
  import gradio as gr
3
- from transformers import pipeline
4
 
5
  # ====== Load AI models ======
6
  # Summarization model (BART)
7
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
8
 
9
  # Question generation model (T5 small, CPU-friendly)
10
- question_generator = pipeline("text2text-generation", model="valhalla/t5-small-qg-hl")
 
 
 
11
 
12
  # ====== Summarizer Function ======
13
  def summarize_text(text, mode):
@@ -28,7 +31,6 @@ def summarize_text(text, mode):
28
  def generate_quiz(text):
29
  if not text.strip():
30
  return "⚠️ Please provide some notes!"
31
- # Generate 5 questions
32
  output = question_generator(f"generate questions: {text}", max_length=256)
33
  return output[0]['generated_text']
34
 
@@ -36,7 +38,6 @@ def generate_quiz(text):
36
  def generate_flashcards(text):
37
  if not text.strip():
38
  return "⚠️ Please provide some notes!"
39
- # Generate Q&A style flashcards
40
  output = question_generator(f"generate flashcards: {text}", max_length=256)
41
  return output[0]['generated_text']
42
 
 
1
  # app.py
2
  import gradio as gr
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
  # ====== Load AI models ======
6
  # Summarization model (BART)
7
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
8
 
9
  # Question generation model (T5 small, CPU-friendly)
10
+ # Manually load tokenizer to avoid SentencePiece fast tokenizer issues
11
+ tokenizer = AutoTokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
12
+ model = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
13
+ question_generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
14
 
15
  # ====== Summarizer Function ======
16
  def summarize_text(text, mode):
 
31
  def generate_quiz(text):
32
  if not text.strip():
33
  return "⚠️ Please provide some notes!"
 
34
  output = question_generator(f"generate questions: {text}", max_length=256)
35
  return output[0]['generated_text']
36
 
 
38
  def generate_flashcards(text):
39
  if not text.strip():
40
  return "⚠️ Please provide some notes!"
 
41
  output = question_generator(f"generate flashcards: {text}", max_length=256)
42
  return output[0]['generated_text']
43