lionelgarnier commited on
Commit
350ac7c
·
1 Parent(s): 276236e

update tokenizer

Browse files
Files changed (1) hide show
  1. app.py +16 -23
app.py CHANGED
@@ -37,22 +37,22 @@ def get_image_gen_pipeline():
37
  def get_text_gen_pipeline():
38
  global _text_gen_pipeline
39
  if _text_gen_pipeline is None:
40
- try:
41
- device = "cuda" if torch.cuda.is_available() else "cpu"
42
- tokenizer = AutoTokenizer.from_pretrained(
43
- "mistralai/Mistral-7B-Instruct-v0.3",
44
- use_fast=True # Ensure fast tokenizer
45
- )
46
- _text_gen_pipeline = pipeline(
47
- "text-generation",
48
- model="mistralai/Mistral-7B-Instruct-v0.3",
49
- tokenizer=tokenizer,
50
- max_new_tokens=2048,
51
- device=device,
52
- )
53
- except Exception as e:
54
- print(f"Error loading text generation model: {e}")
55
- return None
56
  return _text_gen_pipeline
57
 
58
  @spaces.GPU()
@@ -146,22 +146,15 @@ def preload_models():
146
  try:
147
  # Préchargement du modèle de génération de texte
148
  device = "cuda" if torch.cuda.is_available() else "cpu"
149
- tokenizer = AutoTokenizer.from_pretrained(
150
- "mistralai/Mistral-7B-Instruct-v0.3",
151
- use_fast=True
152
- )
153
- global _text_gen_pipeline
154
  _text_gen_pipeline = pipeline(
155
  "text-generation",
156
  model="mistralai/Mistral-7B-Instruct-v0.3",
157
- tokenizer=tokenizer,
158
  max_new_tokens=2048,
159
  device=device,
160
  )
161
 
162
  # Préchargement du modèle de génération d'images
163
  dtype = torch.bfloat16
164
- global _image_gen_pipeline
165
  _image_gen_pipeline = DiffusionPipeline.from_pretrained(
166
  "black-forest-labs/FLUX.1-schnell",
167
  torch_dtype=dtype
 
37
  def get_text_gen_pipeline():
38
  global _text_gen_pipeline
39
  if _text_gen_pipeline is None:
40
+ try:
41
+ device = "cuda" if torch.cuda.is_available() else "cpu"
42
+ tokenizer = AutoTokenizer.from_pretrained(
43
+ "mistralai/Mistral-7B-Instruct-v0.3",
44
+ use_fast=True # Force l'utilisation du tokenizer rapide
45
+ )
46
+ _text_gen_pipeline = pipeline(
47
+ "text-generation",
48
+ model="mistralai/Mistral-7B-Instruct-v0.3",
49
+ tokenizer=tokenizer, # Utilise le tokenizer rapide
50
+ max_new_tokens=2048,
51
+ device=device,
52
+ )
53
+ except Exception as e:
54
+ print(f"Error loading text generation model: {e}")
55
+ return None
56
  return _text_gen_pipeline
57
 
58
  @spaces.GPU()
 
146
  try:
147
  # Préchargement du modèle de génération de texte
148
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
149
  _text_gen_pipeline = pipeline(
150
  "text-generation",
151
  model="mistralai/Mistral-7B-Instruct-v0.3",
 
152
  max_new_tokens=2048,
153
  device=device,
154
  )
155
 
156
  # Préchargement du modèle de génération d'images
157
  dtype = torch.bfloat16
 
158
  _image_gen_pipeline = DiffusionPipeline.from_pretrained(
159
  "black-forest-labs/FLUX.1-schnell",
160
  torch_dtype=dtype