Spaces:
Running
Running
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import urllib
|
2 |
import requests
|
3 |
from bs4 import BeautifulSoup
|
@@ -33,7 +38,7 @@ import gradio as gr
|
|
33 |
from lxml.html import fromstring
|
34 |
from huggingface_hub import hf_hub_download
|
35 |
from safetensors.torch import load_file, save_file
|
36 |
-
from diffusers import DiffusionPipeline
|
37 |
from PIL import Image, ImageDraw, ImageFont
|
38 |
from transformers import pipeline, T5ForConditionalGeneration, T5Tokenizer
|
39 |
from refiners.fluxion.utils import manual_seed
|
@@ -502,8 +507,6 @@ function custom(){
|
|
502 |
|
503 |
# torch pipes
|
504 |
|
505 |
-
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
506 |
-
good_vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=dtype).to(device)
|
507 |
image_pipe = DiffusionPipeline.from_pretrained("ostris/Flex.1-alpha", torch_dtype=dtype, vae=taef1).to(device)
|
508 |
image_pipe.enable_model_cpu_offload()
|
509 |
image_pipe.enable_vae_slicing()
|
@@ -621,7 +624,6 @@ def pipe_generate_image(p1,p2):
|
|
621 |
num_images_per_prompt=1,
|
622 |
num_inference_steps=image_steps,
|
623 |
max_sequence_length=seq,
|
624 |
-
good_vae=good_vae,
|
625 |
generator=torch.Generator(device).manual_seed(random.randint(0, MAX_SEED))
|
626 |
).images
|
627 |
log(f'RET pipe_generate')
|
|
|
1 |
+
"""
|
2 |
+
Some modified code included from:
|
3 |
+
- https://github.com/nidhaloff/deep-translator
|
4 |
+
"""
|
5 |
+
|
6 |
import urllib
|
7 |
import requests
|
8 |
from bs4 import BeautifulSoup
|
|
|
38 |
from lxml.html import fromstring
|
39 |
from huggingface_hub import hf_hub_download
|
40 |
from safetensors.torch import load_file, save_file
|
41 |
+
from diffusers import DiffusionPipeline
|
42 |
from PIL import Image, ImageDraw, ImageFont
|
43 |
from transformers import pipeline, T5ForConditionalGeneration, T5Tokenizer
|
44 |
from refiners.fluxion.utils import manual_seed
|
|
|
507 |
|
508 |
# torch pipes
|
509 |
|
|
|
|
|
510 |
image_pipe = DiffusionPipeline.from_pretrained("ostris/Flex.1-alpha", torch_dtype=dtype, vae=taef1).to(device)
|
511 |
image_pipe.enable_model_cpu_offload()
|
512 |
image_pipe.enable_vae_slicing()
|
|
|
624 |
num_images_per_prompt=1,
|
625 |
num_inference_steps=image_steps,
|
626 |
max_sequence_length=seq,
|
|
|
627 |
generator=torch.Generator(device).manual_seed(random.randint(0, MAX_SEED))
|
628 |
).images
|
629 |
log(f'RET pipe_generate')
|