multimodalart HF Staff commited on
Commit
d3c4eaa
·
verified ·
1 Parent(s): a1d3d88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -22
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- import spaces #[uncomment to use ZeroGPU]
5
  from diffusers import ChromaPipeline
6
  import torch
7
 
@@ -19,19 +19,14 @@ pipe = pipe.to(device)
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
21
 
22
- @spaces.GPU(duration=75) #[uncomment to use ZeroGPU]
23
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, lora_model, progress=gr.Progress(track_tqdm=True)):
24
 
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
27
 
28
  generator = torch.Generator(device).manual_seed(seed)
29
 
30
- # Handle LoRA if needed (ChromaPipeline may not support LoRA by default)
31
- # Uncomment and adapt if LoRA support is available
32
- # if lora_model:
33
- # pipe.load_lora_weights(lora_model)
34
-
35
  image = pipe(
36
  prompt=prompt,
37
  negative_prompt=negative_prompt,
@@ -65,8 +60,8 @@ with gr.Blocks(css=css) as demo:
65
 
66
  with gr.Column(elem_id="col-container"):
67
  gr.Markdown(f"""
68
- # Chroma1-HD - Anaglyph 3D Image Generation
69
- Generate stereoscopic 3D images optimized for red-cyan anaglyph glasses
70
  """)
71
 
72
  with gr.Row():
@@ -88,19 +83,14 @@ with gr.Blocks(css=css) as demo:
88
  result = gr.Image(label="Result", show_label=False)
89
 
90
  with gr.Accordion("Advanced Settings", open=False):
91
-
92
- lora_model = gr.Textbox(
93
- label="LoRA model id (if supported)",
94
- placeholder="Leave empty if not using LoRA",
95
- visible=False # Hidden by default as ChromaPipeline may not support LoRA
96
- )
97
 
98
  guidance_scale = gr.Slider(
99
  label="Guidance Scale",
100
  minimum=1.0,
101
  maximum=10.0,
102
  step=0.1,
103
- value=3.0, # Default from your example
104
  )
105
 
106
  seed = gr.Slider(
@@ -108,7 +98,7 @@ with gr.Blocks(css=css) as demo:
108
  minimum=0,
109
  maximum=MAX_SEED,
110
  step=1,
111
- value=433, # Using the seed from your example
112
  )
113
 
114
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
@@ -120,7 +110,7 @@ with gr.Blocks(css=css) as demo:
120
  minimum=256,
121
  maximum=MAX_IMAGE_SIZE,
122
  step=32,
123
- value=1024, # Default resolution
124
  )
125
 
126
  height = gr.Slider(
@@ -128,7 +118,7 @@ with gr.Blocks(css=css) as demo:
128
  minimum=256,
129
  maximum=MAX_IMAGE_SIZE,
130
  step=32,
131
- value=1024, # Default resolution
132
  )
133
 
134
  num_inference_steps = gr.Slider(
@@ -136,7 +126,7 @@ with gr.Blocks(css=css) as demo:
136
  minimum=1,
137
  maximum=100,
138
  step=1,
139
- value=40, # Default from your example
140
  )
141
 
142
  gr.Examples(
@@ -147,7 +137,7 @@ with gr.Blocks(css=css) as demo:
147
  gr.on(
148
  triggers=[run_button.click, prompt.submit, negative_prompt.submit],
149
  fn=infer,
150
- inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, lora_model],
151
  outputs=[result, seed]
152
  )
153
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ import spaces
5
  from diffusers import ChromaPipeline
6
  import torch
7
 
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
21
 
22
+ @spaces.GPU()
23
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
24
 
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
27
 
28
  generator = torch.Generator(device).manual_seed(seed)
29
 
 
 
 
 
 
30
  image = pipe(
31
  prompt=prompt,
32
  negative_prompt=negative_prompt,
 
60
 
61
  with gr.Column(elem_id="col-container"):
62
  gr.Markdown(f"""
63
+ # Chroma1-HD
64
+ [Chroma1-HD](https://huggingface.co/lodestones/Chroma1-HD) is an 8.9B parameter text-to-image foundational model based on FLUX.1-schnell
65
  """)
66
 
67
  with gr.Row():
 
83
  result = gr.Image(label="Result", show_label=False)
84
 
85
  with gr.Accordion("Advanced Settings", open=False):
86
+
 
 
 
 
 
87
 
88
  guidance_scale = gr.Slider(
89
  label="Guidance Scale",
90
  minimum=1.0,
91
  maximum=10.0,
92
  step=0.1,
93
+ value=3.0,
94
  )
95
 
96
  seed = gr.Slider(
 
98
  minimum=0,
99
  maximum=MAX_SEED,
100
  step=1,
101
+ value=433,
102
  )
103
 
104
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
110
  minimum=256,
111
  maximum=MAX_IMAGE_SIZE,
112
  step=32,
113
+ value=1024,
114
  )
115
 
116
  height = gr.Slider(
 
118
  minimum=256,
119
  maximum=MAX_IMAGE_SIZE,
120
  step=32,
121
+ value=1024,
122
  )
123
 
124
  num_inference_steps = gr.Slider(
 
126
  minimum=1,
127
  maximum=100,
128
  step=1,
129
+ value=40,
130
  )
131
 
132
  gr.Examples(
 
137
  gr.on(
138
  triggers=[run_button.click, prompt.submit, negative_prompt.submit],
139
  fn=infer,
140
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
141
  outputs=[result, seed]
142
  )
143