Update app.py
Browse files
app.py
CHANGED
@@ -462,6 +462,25 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
462 |
final_image = img # Update final_image with the current image
|
463 |
return final_image
|
464 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
465 |
def run_lora(prompt, cfg_scale, steps, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, randomize_seed, seed, width, height, loras_state, image_input=None, progress=gr.Progress(track_tqdm=True)):
|
466 |
print("run_lora function called.") # Debugging statement
|
467 |
print(f"Inputs received - Prompt: {prompt}, CFG Scale: {cfg_scale}, Steps: {steps}, Seed: {seed}, Width: {width}, Height: {height}") # Debugging statement
|
|
|
462 |
final_image = img # Update final_image with the current image
|
463 |
return final_image
|
464 |
|
465 |
+
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
466 |
+
pipe_i2i.to("cuda")
|
467 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
468 |
+
image_input = load_image(image_input_path)
|
469 |
+
final_image = pipe_i2i(
|
470 |
+
prompt=prompt_mash,
|
471 |
+
image=image_input,
|
472 |
+
strength=image_strength,
|
473 |
+
num_inference_steps=steps,
|
474 |
+
guidance_scale=cfg_scale,
|
475 |
+
width=width,
|
476 |
+
height=height,
|
477 |
+
generator=generator,
|
478 |
+
joint_attention_kwargs={"scale": 1.0},
|
479 |
+
output_type="pil",
|
480 |
+
).images[0]
|
481 |
+
return final_image
|
482 |
+
|
483 |
+
@spaces.GPU(duration=75)
|
484 |
def run_lora(prompt, cfg_scale, steps, selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, randomize_seed, seed, width, height, loras_state, image_input=None, progress=gr.Progress(track_tqdm=True)):
|
485 |
print("run_lora function called.") # Debugging statement
|
486 |
print(f"Inputs received - Prompt: {prompt}, CFG Scale: {cfg_scale}, Steps: {steps}, Seed: {seed}, Width: {width}, Height: {height}") # Debugging statement
|