nguyenlam0306 commited on
Commit
1c20d3f
·
1 Parent(s): 016478f

Loi tokenzier

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
  import io
@@ -7,12 +7,12 @@ from PIL import Image
7
 
8
  # Load the summarization model (lacos03/bart-base-finetuned-xsum) manually
9
  model_name = "lacos03/bart-base-finetuned-xsum"
10
- tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
12
 
13
  # Create a custom GenerationConfig to fix early_stopping issue
14
  generation_config = GenerationConfig.from_pretrained(model_name)
15
- generation_config.early_stopping = True # Set to True to fix the error
16
 
17
  # Initialize the summarization pipeline
18
  summarizer = pipeline(
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoModelForSeq2SeqLM, BartTokenizer, GenerationConfig
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
  import io
 
7
 
8
  # Load the summarization model (lacos03/bart-base-finetuned-xsum) manually
9
  model_name = "lacos03/bart-base-finetuned-xsum"
10
+ tokenizer = BartTokenizer.from_pretrained(model_name, use_fast=False) # Use slow tokenizer
11
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
12
 
13
  # Create a custom GenerationConfig to fix early_stopping issue
14
  generation_config = GenerationConfig.from_pretrained(model_name)
15
+ generation_config.early_stopping = True # Fix early_stopping error
16
 
17
  # Initialize the summarization pipeline
18
  summarizer = pipeline(