lionelgarnier commited on
Commit
85a6d42
·
1 Parent(s): 470ecaf

udpdate examples

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -27,7 +27,7 @@ def get_image_gen_pipeline():
27
  # Load fast tokenizer for the image pipeline
28
  tokenizer = AutoTokenizer.from_pretrained(
29
  "black-forest-labs/FLUX.1-schnell",
30
- use_fast=True
31
  )
32
  _image_gen_pipeline = DiffusionPipeline.from_pretrained(
33
  "black-forest-labs/FLUX.1-schnell",
@@ -47,12 +47,12 @@ def get_text_gen_pipeline():
47
  device = "cuda" if torch.cuda.is_available() else "cpu"
48
  tokenizer = AutoTokenizer.from_pretrained(
49
  "mistralai/Mistral-7B-Instruct-v0.3",
50
- use_fast=True # Force l'utilisation du tokenizer rapide
51
  )
52
  _text_gen_pipeline = pipeline(
53
  "text-generation",
54
  model="mistralai/Mistral-7B-Instruct-v0.3",
55
- tokenizer=tokenizer, # Utilise le tokenizer rapide
56
  max_new_tokens=2048,
57
  device=device,
58
  )
@@ -306,7 +306,8 @@ def create_interface():
306
  "medieval flip flops",
307
  "cat shaped cake mold"
308
  ],
309
- inputs=[prompt],
 
310
  outputs=[generated_image, seed],
311
  cache_examples=True,
312
  cache_mode='lazy'
 
27
  # Load fast tokenizer for the image pipeline
28
  tokenizer = AutoTokenizer.from_pretrained(
29
  "black-forest-labs/FLUX.1-schnell",
30
+ use_fast=False
31
  )
32
  _image_gen_pipeline = DiffusionPipeline.from_pretrained(
33
  "black-forest-labs/FLUX.1-schnell",
 
47
  device = "cuda" if torch.cuda.is_available() else "cpu"
48
  tokenizer = AutoTokenizer.from_pretrained(
49
  "mistralai/Mistral-7B-Instruct-v0.3",
50
+ use_fast=True
51
  )
52
  _text_gen_pipeline = pipeline(
53
  "text-generation",
54
  model="mistralai/Mistral-7B-Instruct-v0.3",
55
+ tokenizer=tokenizer,
56
  max_new_tokens=2048,
57
  device=device,
58
  )
 
306
  "medieval flip flops",
307
  "cat shaped cake mold"
308
  ],
309
+ inputs=prompt,
310
+ fn=infer,
311
  outputs=[generated_image, seed],
312
  cache_examples=True,
313
  cache_mode='lazy'