Himanshu806 commited on
Commit
1ab58ef
Β·
verified Β·
1 Parent(s): cef5e11

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -24
app.py CHANGED
@@ -22,22 +22,22 @@ MAX_IMAGE_SIZE = 2048
22
 
23
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
24
 
25
- with open("lora_models.json", "r") as f:
26
- lora_models = json.load(f)
27
 
28
- def download_model(model_name, model_path):
29
- print(f"Downloading model: {model_name} from {model_path}")
30
- try:
31
- pipe.load_lora_weights(model_path)
32
- print(f"Successfully downloaded model: {model_name}")
33
- except Exception as e:
34
- print(f"Failed to download model: {model_name}. Error: {e}")
35
 
36
- # Iterate through the models and download each one
37
- for model_name, model_path in lora_models.items():
38
- download_model(model_name, model_path)
39
 
40
- lora_models["None"] = None
41
 
42
  # def calculate_optimal_dimensions(image: Image.Image):
43
  # # Extract the original dimensions
@@ -77,13 +77,14 @@ lora_models["None"] = None
77
  # return width, height
78
 
79
  @spaces.GPU(durations=300)
80
- def infer(edit_images, prompt, width, height, lora_model, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
 
81
  # pipe.enable_xformers_memory_efficient_attention()
82
  gr.Info("Infering")
83
 
84
- if lora_model != "None":
85
- pipe.load_lora_weights(lora_models[lora_model])
86
- pipe.enable_lora()
87
 
88
  gr.Info("starting checks")
89
 
@@ -94,7 +95,6 @@ def infer(edit_images, prompt, width, height, lora_model, strength, seed=42, ran
94
  gr.Info("Please upload an image.")
95
  return None, None
96
 
97
-
98
  # width, height = calculate_optimal_dimensions(image)
99
  if randomize_seed:
100
  seed = random.randint(0, MAX_SEED)
@@ -211,11 +211,11 @@ with gr.Blocks(css=css) as demo:
211
  container=False,
212
  )
213
 
214
- lora_model = gr.Dropdown(
215
- label="Select LoRA Model",
216
- choices=list(lora_models.keys()),
217
- value="None",
218
- )
219
 
220
  run_button = gr.Button("Run")
221
 
@@ -282,7 +282,7 @@ with gr.Blocks(css=css) as demo:
282
  gr.on(
283
  triggers=[run_button.click, prompt.submit],
284
  fn = infer,
285
- inputs = [edit_image, prompt, width, height, lora_model, strength, seed, randomize_seed, guidance_scale, num_inference_steps],
286
  outputs = [result, seed]
287
  )
288
 
 
22
 
23
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
24
 
25
+ # with open("lora_models.json", "r") as f:
26
+ # lora_models = json.load(f)
27
 
28
+ # def download_model(model_name, model_path):
29
+ # print(f"Downloading model: {model_name} from {model_path}")
30
+ # try:
31
+ # pipe.load_lora_weights(model_path)
32
+ # print(f"Successfully downloaded model: {model_name}")
33
+ # except Exception as e:
34
+ # print(f"Failed to download model: {model_name}. Error: {e}")
35
 
36
+ # # Iterate through the models and download each one
37
+ # for model_name, model_path in lora_models.items():
38
+ # download_model(model_name, model_path)
39
 
40
+ # lora_models["None"] = None
41
 
42
  # def calculate_optimal_dimensions(image: Image.Image):
43
  # # Extract the original dimensions
 
77
  # return width, height
78
 
79
  @spaces.GPU(durations=300)
80
+ # def infer(edit_images, prompt, width, height, lora_model, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
81
+ def infer(edit_images, prompt, width, height, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
82
  # pipe.enable_xformers_memory_efficient_attention()
83
  gr.Info("Infering")
84
 
85
+ # if lora_model != "None":
86
+ # pipe.load_lora_weights(lora_models[lora_model])
87
+ # pipe.enable_lora()
88
 
89
  gr.Info("starting checks")
90
 
 
95
  gr.Info("Please upload an image.")
96
  return None, None
97
 
 
98
  # width, height = calculate_optimal_dimensions(image)
99
  if randomize_seed:
100
  seed = random.randint(0, MAX_SEED)
 
211
  container=False,
212
  )
213
 
214
+ # lora_model = gr.Dropdown(
215
+ # label="Select LoRA Model",
216
+ # choices=list(lora_models.keys()),
217
+ # value="None",
218
+ # )
219
 
220
  run_button = gr.Button("Run")
221
 
 
282
  gr.on(
283
  triggers=[run_button.click, prompt.submit],
284
  fn = infer,
285
+ inputs = [edit_image, prompt, width, height, strength, seed, randomize_seed, guidance_scale, num_inference_steps],
286
  outputs = [result, seed]
287
  )
288