File size: 2,388 Bytes
98889c8 58eaf7f e87f39a 58eaf7f 98889c8 e87f39a 98889c8 e87f39a 98889c8 58eaf7f 98889c8 58eaf7f 98889c8 58eaf7f 98889c8 58eaf7f 98889c8 58eaf7f e87f39a 98889c8 58eaf7f e87f39a 58eaf7f e87f39a 98889c8 58eaf7f 98889c8 58eaf7f 98889c8 58eaf7f 98889c8 58eaf7f 98889c8 e87f39a 58eaf7f 98889c8 58eaf7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import gradio as gr
import torch
import numpy as np
from diffusers import StableDiffusionXLImg2ImgPipeline
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
from PIL import Image, ImageEnhance, ImageOps
# Configuração de dispositivo
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float32 if device == "cpu" else torch.float16
print("Carregando modelo SDXL Img2Img...")
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch_dtype,
use_safetensors=True
).to(device)
print("Carregando pesos LoRA...")
pipe.load_lora_weights(
"KappaNeuro/bas-relief",
weight_name="BAS-RELIEF.safetensors",
adapter_name="bas_relief"
)
print("Carregando modelo de profundidade...")
feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(device)
def processar_profundidade(depth_arr: np.ndarray) -> Image.Image:
depth_normalized = (depth_arr - depth_arr.min()) / (depth_arr.max() - depth_arr.min() + 1e-8)
depth_img = Image.fromarray((depth_normalized * 255).astype(np.uint8))
return ImageOps.autocontrast(depth_img)
def processar_imagem(imagem: Image.Image):
# Pré-processamento
imagem = imagem.convert("RGB").resize((512, 512))
# Gerar baixo-relevo
with torch.inference_mode():
resultado = pipe(
prompt="BAS-RELIEF",
image=imagem,
strength=0.7,
num_inference_steps=20,
guidance_scale=7.5
)
# Calcular profundidade
inputs = feature_extractor(resultado.images[0], return_tensors="pt").to(device)
with torch.no_grad():
depth = depth_model(**inputs).predicted_depth
depth_map = torch.nn.functional.interpolate(
depth.unsqueeze(1),
size=imagem.size[::-1],
mode="bicubic"
).squeeze().cpu().numpy()
return resultado.images[0], processar_profundidade(depth_map)
# Interface Gradio
interface = gr.Interface(
fn=processar_imagem,
inputs=gr.Image(type="pil"),
outputs=[gr.Image(label="Resultado"), gr.Image(label="Profundidade")],
title="Conversor para Baixo-relevo",
description="Transforme imagens em baixo-relevo com mapa de profundidade"
)
if __name__ == "__main__":
interface.launch() |