alexnasa commited on
Commit
4d6da88
·
verified ·
1 Parent(s): dda6c02

Update gradio_seesr_turbo.py

Browse files
Files changed (1) hide show
  1. gradio_seesr_turbo.py +10 -5
gradio_seesr_turbo.py CHANGED
@@ -47,6 +47,11 @@ snapshot_download(
47
  )
48
 
49
 
 
 
 
 
 
50
  # Load scheduler, tokenizer and models.
51
  pretrained_model_path = 'preset/models/sd-turbo'
52
  seesr_model_path = 'preset/models/seesr'
@@ -56,7 +61,7 @@ text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="t
56
  tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
57
  vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
58
  # feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
59
- unet = UNet2DConditionModel.from_pretrained_orig(seesr_model_path, subfolder="unet")
60
  controlnet = ControlNetModel.from_pretrained(seesr_model_path, subfolder="controlnet")
61
 
62
  # Freeze vae and text_encoder
@@ -181,8 +186,8 @@ with block:
181
  gr.Markdown(MARKDOWN)
182
  with gr.Row():
183
  with gr.Column():
184
- input_image = gr.Image(source="upload", type="pil")
185
- run_button = gr.Button(label="Run")
186
  with gr.Accordion("Options", open=True):
187
  user_prompt = gr.Textbox(label="User Prompt", value="")
188
  positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
@@ -198,7 +203,7 @@ with block:
198
  latent_tiled_overlap = gr.Slider(label="Diffusion Tile Overlap", minimum=4, maximum=16, value=4, step=1)
199
  scale_factor = gr.Number(label="SR Scale", value=4)
200
  with gr.Column():
201
- result_gallery = gr.Gallery(label="Output", show_label=False, elem_id="gallery").style(grid=2, height="auto")
202
 
203
  inputs = [
204
  input_image,
@@ -215,5 +220,5 @@ with block:
215
  ]
216
  run_button.click(fn=process, inputs=inputs, outputs=[result_gallery])
217
 
218
- block.launch()
219
 
 
47
  )
48
 
49
 
50
+ snapshot_download(
51
+ repo_id="xinyu1205/recognize_anything_model",
52
+ local_dir="preset/models/"
53
+ )
54
+
55
  # Load scheduler, tokenizer and models.
56
  pretrained_model_path = 'preset/models/sd-turbo'
57
  seesr_model_path = 'preset/models/seesr'
 
61
  tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
62
  vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
63
  # feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
64
+ unet = UNet2DConditionModel.from_pretrained_orig(pretrained_model_path, seesr_model_path, subfolder="unet")
65
  controlnet = ControlNetModel.from_pretrained(seesr_model_path, subfolder="controlnet")
66
 
67
  # Freeze vae and text_encoder
 
186
  gr.Markdown(MARKDOWN)
187
  with gr.Row():
188
  with gr.Column():
189
+ input_image = gr.Image(type="pil")
190
+ run_button = gr.Button("Run")
191
  with gr.Accordion("Options", open=True):
192
  user_prompt = gr.Textbox(label="User Prompt", value="")
193
  positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
 
203
  latent_tiled_overlap = gr.Slider(label="Diffusion Tile Overlap", minimum=4, maximum=16, value=4, step=1)
204
  scale_factor = gr.Number(label="SR Scale", value=4)
205
  with gr.Column():
206
+ result_gallery = gr.Gallery(label="Output", show_label=False, elem_id="gallery")
207
 
208
  inputs = [
209
  input_image,
 
220
  ]
221
  run_button.click(fn=process, inputs=inputs, outputs=[result_gallery])
222
 
223
+ block.launch(share=True)
224