Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -50,41 +50,37 @@ image_examples = [
|
|
| 50 |
|
| 51 |
]
|
| 52 |
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
gr.Info(str(f"Inject LoRA: {lora_path}"))
|
| 85 |
-
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors")
|
| 86 |
-
gr.Info(str(f"Model loading: {int((100 / 100) * 100)}%"))
|
| 87 |
-
return pipe
|
| 88 |
@spaces.GPU
|
| 89 |
def set_seed(seed):
|
| 90 |
torch.manual_seed(seed)
|
|
@@ -277,9 +273,7 @@ with gr.Blocks(
|
|
| 277 |
),
|
| 278 |
title="Omnieraser"
|
| 279 |
) as demo:
|
| 280 |
-
|
| 281 |
-
lora_path = 'theSure/Omnieraser'
|
| 282 |
-
#a = load_model(base_model_path=base_model_path, lora_path=lora_path)
|
| 283 |
|
| 284 |
ddim_steps = gr.Slider(visible=False, value=28)
|
| 285 |
scale = gr.Slider(visible=False, value=3.5)
|
|
|
|
| 50 |
|
| 51 |
]
|
| 52 |
|
| 53 |
+
base_model_path = 'black-forest-labs/FLUX.1-dev'
|
| 54 |
+
lora_path = 'theSure/Omnieraser'
|
| 55 |
+
transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder='transformer', torch_dtype=torch.bfloat16)
|
| 56 |
+
gr.Info(str(f"Model loading: {int((40 / 100) * 100)}%"))
|
| 57 |
+
# enable image inputs
|
| 58 |
+
with torch.no_grad():
|
| 59 |
+
initial_input_channels = transformer.config.in_channels
|
| 60 |
+
new_linear = torch.nn.Linear(
|
| 61 |
+
transformer.x_embedder.in_features*4,
|
| 62 |
+
transformer.x_embedder.out_features,
|
| 63 |
+
bias=transformer.x_embedder.bias is not None,
|
| 64 |
+
dtype=transformer.dtype,
|
| 65 |
+
device=transformer.device,
|
| 66 |
+
)
|
| 67 |
+
new_linear.weight.zero_()
|
| 68 |
+
new_linear.weight[:, :initial_input_channels].copy_(transformer.x_embedder.weight)
|
| 69 |
+
if transformer.x_embedder.bias is not None:
|
| 70 |
+
new_linear.bias.copy_(transformer.x_embedder.bias)
|
| 71 |
+
transformer.x_embedder = new_linear
|
| 72 |
+
transformer.register_to_config(in_channels=initial_input_channels*4)
|
| 73 |
+
pipe = FluxControlRemovalPipeline.from_pretrained(
|
| 74 |
+
base_model_path,
|
| 75 |
+
transformer=transformer,
|
| 76 |
+
torch_dtype=torch.bfloat16
|
| 77 |
+
).to("cuda")
|
| 78 |
+
pipe.transformer.to(torch.bfloat16)
|
| 79 |
+
gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
|
| 80 |
+
gr.Info(str(f"Inject LoRA: {lora_path}"))
|
| 81 |
+
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors")
|
| 82 |
+
gr.Info(str(f"Model loading: {int((100 / 100) * 100)}%"))
|
| 83 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
@spaces.GPU
|
| 85 |
def set_seed(seed):
|
| 86 |
torch.manual_seed(seed)
|
|
|
|
| 273 |
),
|
| 274 |
title="Omnieraser"
|
| 275 |
) as demo:
|
| 276 |
+
|
|
|
|
|
|
|
| 277 |
|
| 278 |
ddim_steps = gr.Slider(visible=False, value=28)
|
| 279 |
scale = gr.Slider(visible=False, value=3.5)
|