Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -114,25 +114,25 @@ snapshot_download(
|
|
114 |
|
115 |
|
116 |
snapshot_download(
|
117 |
-
repo_id="stabilityai/
|
118 |
-
local_dir="preset/models/
|
119 |
)
|
120 |
|
|
|
121 |
snapshot_download(
|
122 |
repo_id="xinyu1205/recognize_anything_model",
|
123 |
local_dir="preset/models/"
|
124 |
)
|
125 |
|
126 |
-
|
127 |
# Load scheduler, tokenizer and models.
|
128 |
-
pretrained_model_path = 'preset/models/
|
129 |
seesr_model_path = 'preset/models/seesr'
|
130 |
|
131 |
scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler")
|
132 |
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
|
133 |
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
134 |
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
|
135 |
-
feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
|
136 |
unet = UNet2DConditionModel.from_pretrained(seesr_model_path, subfolder="unet")
|
137 |
controlnet = ControlNetModel.from_pretrained(seesr_model_path, subfolder="controlnet")
|
138 |
|
@@ -192,9 +192,9 @@ def magnify(
|
|
192 |
user_prompt = "",
|
193 |
positive_prompt = "clean, high-resolution, 8k, best quality, masterpiece",
|
194 |
negative_prompt = "dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
195 |
-
num_inference_steps =
|
196 |
scale_factor = 4,
|
197 |
-
cfg_scale =
|
198 |
seed = 123,
|
199 |
latent_tiled_size = 320,
|
200 |
latent_tiled_overlap = 4,
|
@@ -302,8 +302,8 @@ with gr.Blocks(css=css, theme=theme) as demo:
|
|
302 |
label="Negative Prompt",
|
303 |
value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
|
304 |
)
|
305 |
-
cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=10, value=
|
306 |
-
num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=100, value=
|
307 |
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
|
308 |
sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
|
309 |
latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
|
|
|
114 |
|
115 |
|
116 |
snapshot_download(
|
117 |
+
repo_id="stabilityai/sd-turbo",
|
118 |
+
local_dir="preset/models/sd-turbo"
|
119 |
)
|
120 |
|
121 |
+
|
122 |
snapshot_download(
|
123 |
repo_id="xinyu1205/recognize_anything_model",
|
124 |
local_dir="preset/models/"
|
125 |
)
|
126 |
|
|
|
127 |
# Load scheduler, tokenizer and models.
|
128 |
+
pretrained_model_path = 'preset/models/sd-turbo'
|
129 |
seesr_model_path = 'preset/models/seesr'
|
130 |
|
131 |
scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler")
|
132 |
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
|
133 |
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
134 |
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
|
135 |
+
# feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
|
136 |
unet = UNet2DConditionModel.from_pretrained(seesr_model_path, subfolder="unet")
|
137 |
controlnet = ControlNetModel.from_pretrained(seesr_model_path, subfolder="controlnet")
|
138 |
|
|
|
192 |
user_prompt = "",
|
193 |
positive_prompt = "clean, high-resolution, 8k, best quality, masterpiece",
|
194 |
negative_prompt = "dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
195 |
+
num_inference_steps = 2,
|
196 |
scale_factor = 4,
|
197 |
+
cfg_scale = 1,
|
198 |
seed = 123,
|
199 |
latent_tiled_size = 320,
|
200 |
latent_tiled_overlap = 4,
|
|
|
302 |
label="Negative Prompt",
|
303 |
value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
|
304 |
)
|
305 |
+
cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=10, value=1, step=0)
|
306 |
+
num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=100, value=2, step=1)
|
307 |
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
|
308 |
sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
|
309 |
latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
|