Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -62,9 +62,9 @@ def enable_lora(lora_scale, lora_in, lora_add):
|
|
| 62 |
if not lora_in and not lora_add:
|
| 63 |
return
|
| 64 |
else:
|
| 65 |
-
if
|
| 66 |
-
|
| 67 |
-
url = f'https://huggingface.co/{
|
| 68 |
lora_name = scrape_lora_link(url)
|
| 69 |
pipe.load_lora_weights(lora_add, weight_name=lora_name)
|
| 70 |
pipe.fuse_lora(lora_scale=lora_scale)
|
|
@@ -76,13 +76,13 @@ def generate_image(
|
|
| 76 |
scales:float=3.5,
|
| 77 |
steps:int=24,
|
| 78 |
seed:int=-1,
|
| 79 |
-
nums:int=1
|
| 80 |
-
progress=gr.Progress(track_tqdm=True)):
|
| 81 |
|
| 82 |
if seed == -1:
|
| 83 |
seed = random.randint(0, MAX_SEED)
|
| 84 |
seed = int(seed)
|
| 85 |
print(f'prompt:{prompt}')
|
|
|
|
| 86 |
|
| 87 |
text = str(translator.translate(prompt, 'English'))
|
| 88 |
|
|
@@ -114,11 +114,13 @@ def gen(
|
|
| 114 |
nums:int=1,
|
| 115 |
lora_scale:float=1.0,
|
| 116 |
lora_in:str="",
|
| 117 |
-
lora_add:str=""
|
|
|
|
| 118 |
):
|
| 119 |
enable_lora(lora_scale, lora_in, lora_add)
|
| 120 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
| 121 |
-
generate_image(prompt,width,height,scales,steps,seed,nums)
|
|
|
|
| 122 |
|
| 123 |
|
| 124 |
examples_bak = [
|
|
@@ -209,10 +211,9 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 209 |
value=1.0,
|
| 210 |
)
|
| 211 |
lora_in = gr.Dropdown(
|
| 212 |
-
choices=["Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration", "Shakker-Labs/AWPortrait-FL"],
|
| 213 |
label="LoRA Model",
|
| 214 |
info="Load the LoRA",
|
| 215 |
-
value="",
|
| 216 |
)
|
| 217 |
lora_add = gr.Textbox(
|
| 218 |
label="Add Flux LoRA",
|
|
|
|
| 62 |
if not lora_in and not lora_add:
|
| 63 |
return
|
| 64 |
else:
|
| 65 |
+
if lora_add:
|
| 66 |
+
lora_in = lora_add
|
| 67 |
+
url = f'https://huggingface.co/{lora_in}/tree/main'
|
| 68 |
lora_name = scrape_lora_link(url)
|
| 69 |
pipe.load_lora_weights(lora_add, weight_name=lora_name)
|
| 70 |
pipe.fuse_lora(lora_scale=lora_scale)
|
|
|
|
| 76 |
scales:float=3.5,
|
| 77 |
steps:int=24,
|
| 78 |
seed:int=-1,
|
| 79 |
+
nums:int=1):
|
|
|
|
| 80 |
|
| 81 |
if seed == -1:
|
| 82 |
seed = random.randint(0, MAX_SEED)
|
| 83 |
seed = int(seed)
|
| 84 |
print(f'prompt:{prompt}')
|
| 85 |
+
print(type(width))
|
| 86 |
|
| 87 |
text = str(translator.translate(prompt, 'English'))
|
| 88 |
|
|
|
|
| 114 |
nums:int=1,
|
| 115 |
lora_scale:float=1.0,
|
| 116 |
lora_in:str="",
|
| 117 |
+
lora_add:str="",
|
| 118 |
+
progress=gr.Progress(track_tqdm=True)
|
| 119 |
):
|
| 120 |
enable_lora(lora_scale, lora_in, lora_add)
|
| 121 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
| 122 |
+
return generate_image(prompt,width,height,scales,steps,seed,nums)
|
| 123 |
+
|
| 124 |
|
| 125 |
|
| 126 |
examples_bak = [
|
|
|
|
| 211 |
value=1.0,
|
| 212 |
)
|
| 213 |
lora_in = gr.Dropdown(
|
| 214 |
+
choices=["Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration", "Shakker-Labs/AWPortrait-FL",""],
|
| 215 |
label="LoRA Model",
|
| 216 |
info="Load the LoRA",
|
|
|
|
| 217 |
)
|
| 218 |
lora_add = gr.Textbox(
|
| 219 |
label="Add Flux LoRA",
|