ghostai1 commited on
Commit
938ef03
Β·
verified Β·
1 Parent(s): 374ad82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -1,19 +1,28 @@
1
  # πŸ”„ Text Paraphraser | CPU-only HF Space
2
 
3
  import gradio as gr
4
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
5
 
6
- # Load the paraphrase pipeline once at startup
7
  paraphraser = pipeline(
8
  "text2text-generation",
9
- model="Vamsi/T5_Paraphrase_Paws",
 
10
  device=-1, # CPU
11
  )
12
 
13
  def paraphrase(text: str, num_variations: int):
14
  if not text.strip():
15
  return []
16
- # T5 paraphrase prompt
17
  prompt = "paraphrase: " + text.strip()
18
  outputs = paraphraser(
19
  prompt,
@@ -23,7 +32,6 @@ def paraphrase(text: str, num_variations: int):
23
  top_k=120,
24
  top_p=0.95
25
  )
26
- # Extract generated_text
27
  return [out["generated_text"].strip() for out in outputs]
28
 
29
  with gr.Blocks(title="πŸ”„ Text Paraphraser") as demo:
 
1
  # πŸ”„ Text Paraphraser | CPU-only HF Space
2
 
3
  import gradio as gr
4
+ from transformers import (
5
+ AutoTokenizer,
6
+ AutoModelForSeq2SeqLM,
7
+ pipeline,
8
+ )
9
+
10
+ # 1️⃣ Load model + slow tokenizer explicitly
11
+ MODEL_ID = "Vamsi/T5_Paraphrase_Paws"
12
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
13
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
14
 
15
+ # 2️⃣ Create paraphrase pipeline with our slow tokenizer
16
  paraphraser = pipeline(
17
  "text2text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
  device=-1, # CPU
21
  )
22
 
23
  def paraphrase(text: str, num_variations: int):
24
  if not text.strip():
25
  return []
 
26
  prompt = "paraphrase: " + text.strip()
27
  outputs = paraphraser(
28
  prompt,
 
32
  top_k=120,
33
  top_p=0.95
34
  )
 
35
  return [out["generated_text"].strip() for out in outputs]
36
 
37
  with gr.Blocks(title="πŸ”„ Text Paraphraser") as demo: