Abe
commited on
Commit
·
aa0c79b
1
Parent(s):
c1262f3
random seed
Browse files- api.py +14 -2
- app.py +14 -3
- inference.py +18 -1
api.py
CHANGED
@@ -25,7 +25,8 @@ async def text_to_image(
|
|
25 |
model: str = Form(config.DEFAULT_TEXT2IMG_MODEL),
|
26 |
negative_prompt: str = Form(config.DEFAULT_NEGATIVE_PROMPT),
|
27 |
guidance_scale: float = Form(7.5),
|
28 |
-
num_inference_steps: int = Form(50)
|
|
|
29 |
):
|
30 |
"""
|
31 |
Generate an image from a text prompt
|
@@ -39,13 +40,24 @@ async def text_to_image(
|
|
39 |
if not negative_prompt or negative_prompt.strip() == '':
|
40 |
negative_prompt = config.DEFAULT_NEGATIVE_PROMPT
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Call the inference module
|
43 |
image = inference.text_to_image(
|
44 |
prompt=prompt,
|
45 |
model_name=model,
|
46 |
negative_prompt=negative_prompt,
|
47 |
guidance_scale=guidance_scale,
|
48 |
-
num_inference_steps=num_inference_steps
|
|
|
49 |
)
|
50 |
|
51 |
# Convert PIL image to bytes
|
|
|
25 |
model: str = Form(config.DEFAULT_TEXT2IMG_MODEL),
|
26 |
negative_prompt: str = Form(config.DEFAULT_NEGATIVE_PROMPT),
|
27 |
guidance_scale: float = Form(7.5),
|
28 |
+
num_inference_steps: int = Form(50),
|
29 |
+
seed: str = Form(None)
|
30 |
):
|
31 |
"""
|
32 |
Generate an image from a text prompt
|
|
|
40 |
if not negative_prompt or negative_prompt.strip() == '':
|
41 |
negative_prompt = config.DEFAULT_NEGATIVE_PROMPT
|
42 |
|
43 |
+
# Process seed parameter
|
44 |
+
# We'll pass seed=None to inference.text_to_image if no valid seed is provided
|
45 |
+
# The random seed will be generated in the inference module
|
46 |
+
seed_value = None
|
47 |
+
if seed is not None and seed.strip() != '':
|
48 |
+
try:
|
49 |
+
seed_value = int(seed)
|
50 |
+
except (ValueError, TypeError):
|
51 |
+
# Let the inference module handle invalid seed
|
52 |
+
|
53 |
# Call the inference module
|
54 |
image = inference.text_to_image(
|
55 |
prompt=prompt,
|
56 |
model_name=model,
|
57 |
negative_prompt=negative_prompt,
|
58 |
guidance_scale=guidance_scale,
|
59 |
+
num_inference_steps=num_inference_steps,
|
60 |
+
seed=seed_value
|
61 |
)
|
62 |
|
63 |
# Convert PIL image to bytes
|
app.py
CHANGED
@@ -11,18 +11,28 @@ inference = DiffusionInference()
|
|
11 |
# Initialize the ControlNet pipeline
|
12 |
controlnet = ControlNetPipeline()
|
13 |
|
14 |
-
def text_to_image_fn(prompt, model, negative_prompt=None, guidance_scale=7.5, num_inference_steps=50):
|
15 |
try:
|
16 |
# Model validation - fallback to default if empty
|
17 |
if not model or model.strip() == '':
|
18 |
model = config.DEFAULT_TEXT2IMG_MODEL
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
# Create kwargs dictionary for parameters
|
21 |
kwargs = {
|
22 |
"prompt": prompt,
|
23 |
"model_name": model,
|
24 |
"guidance_scale": guidance_scale,
|
25 |
-
"num_inference_steps": num_inference_steps
|
|
|
26 |
}
|
27 |
|
28 |
# Only add negative_prompt if it's not None
|
@@ -102,6 +112,7 @@ with gr.Blocks(title="Diffusion Models") as app:
|
|
102 |
txt2img_model = gr.Textbox(label="Model", placeholder=f"Enter model name", value=config.DEFAULT_TEXT2IMG_MODEL)
|
103 |
txt2img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale")
|
104 |
txt2img_steps = gr.Slider(minimum=10, maximum=100, value=50, step=1, label="Inference Steps")
|
|
|
105 |
txt2img_button = gr.Button("Generate Image")
|
106 |
|
107 |
with gr.Column():
|
@@ -110,7 +121,7 @@ with gr.Blocks(title="Diffusion Models") as app:
|
|
110 |
|
111 |
txt2img_button.click(
|
112 |
fn=text_to_image_fn,
|
113 |
-
inputs=[txt2img_prompt, txt2img_model, txt2img_negative, txt2img_guidance, txt2img_steps],
|
114 |
outputs=[txt2img_output, txt2img_error]
|
115 |
)
|
116 |
|
|
|
11 |
# Initialize the ControlNet pipeline
|
12 |
controlnet = ControlNetPipeline()
|
13 |
|
14 |
+
def text_to_image_fn(prompt, model, negative_prompt=None, guidance_scale=7.5, num_inference_steps=50, seed=None):
|
15 |
try:
|
16 |
# Model validation - fallback to default if empty
|
17 |
if not model or model.strip() == '':
|
18 |
model = config.DEFAULT_TEXT2IMG_MODEL
|
19 |
|
20 |
+
# Prepare seed parameter
|
21 |
+
seed_value = None
|
22 |
+
if seed and seed.strip() != '':
|
23 |
+
try:
|
24 |
+
seed_value = int(seed)
|
25 |
+
except (ValueError, TypeError):
|
26 |
+
# Let inference handle invalid seed
|
27 |
+
pass
|
28 |
+
|
29 |
# Create kwargs dictionary for parameters
|
30 |
kwargs = {
|
31 |
"prompt": prompt,
|
32 |
"model_name": model,
|
33 |
"guidance_scale": guidance_scale,
|
34 |
+
"num_inference_steps": num_inference_steps,
|
35 |
+
"seed": seed_value
|
36 |
}
|
37 |
|
38 |
# Only add negative_prompt if it's not None
|
|
|
112 |
txt2img_model = gr.Textbox(label="Model", placeholder=f"Enter model name", value=config.DEFAULT_TEXT2IMG_MODEL)
|
113 |
txt2img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale")
|
114 |
txt2img_steps = gr.Slider(minimum=10, maximum=100, value=50, step=1, label="Inference Steps")
|
115 |
+
txt2img_seed = gr.Textbox(label="Seed (Optional)", placeholder="Leave empty for random seed", value="")
|
116 |
txt2img_button = gr.Button("Generate Image")
|
117 |
|
118 |
with gr.Column():
|
|
|
121 |
|
122 |
txt2img_button.click(
|
123 |
fn=text_to_image_fn,
|
124 |
+
inputs=[txt2img_prompt, txt2img_model, txt2img_negative, txt2img_guidance, txt2img_steps, txt2img_seed],
|
125 |
outputs=[txt2img_output, txt2img_error]
|
126 |
)
|
127 |
|
inference.py
CHANGED
@@ -2,6 +2,7 @@ from huggingface_hub import InferenceClient
|
|
2 |
from PIL import Image
|
3 |
import io
|
4 |
import config
|
|
|
5 |
|
6 |
|
7 |
class DiffusionInference:
|
@@ -15,7 +16,7 @@ class DiffusionInference:
|
|
15 |
api_key=self.api_key,
|
16 |
)
|
17 |
|
18 |
-
def text_to_image(self, prompt, model_name=None, negative_prompt=None, **kwargs):
|
19 |
"""
|
20 |
Generate an image from a text prompt.
|
21 |
|
@@ -36,6 +37,22 @@ class DiffusionInference:
|
|
36 |
"model": model
|
37 |
}
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
# Add negative prompt if provided
|
40 |
if negative_prompt is not None:
|
41 |
params["negative_prompt"] = negative_prompt
|
|
|
2 |
from PIL import Image
|
3 |
import io
|
4 |
import config
|
5 |
+
import random
|
6 |
|
7 |
|
8 |
class DiffusionInference:
|
|
|
16 |
api_key=self.api_key,
|
17 |
)
|
18 |
|
19 |
+
def text_to_image(self, prompt, model_name=None, negative_prompt=None, seed=None, **kwargs):
|
20 |
"""
|
21 |
Generate an image from a text prompt.
|
22 |
|
|
|
37 |
"model": model
|
38 |
}
|
39 |
|
40 |
+
# Handle seed parameter
|
41 |
+
if seed is not None:
|
42 |
+
try:
|
43 |
+
# Convert to integer and add to params
|
44 |
+
params["seed"] = int(seed)
|
45 |
+
except (ValueError, TypeError):
|
46 |
+
# Use random seed if conversion fails
|
47 |
+
random_seed = random.randint(0, 3999999999) # Max 32-bit integer
|
48 |
+
params["seed"] = random_seed
|
49 |
+
print(f"Warning: Invalid seed value: {seed}, using random seed {random_seed} instead")
|
50 |
+
else:
|
51 |
+
# Generate random seed when none is provided
|
52 |
+
random_seed = random.randint(0, 3999999999) # Max 32-bit integer
|
53 |
+
params["seed"] = random_seed
|
54 |
+
print(f"Using random seed: {random_seed}")
|
55 |
+
|
56 |
# Add negative prompt if provided
|
57 |
if negative_prompt is not None:
|
58 |
params["negative_prompt"] = negative_prompt
|