azhan77168 commited on
Commit
3e11adb
·
verified ·
1 Parent(s): b94afcc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from peft import PeftModel
4
+ import gradio as gr
5
+ from diffusers import StableDiffusionImg2ImgPipeline
6
+ from diffusers import AutoPipelineForImage2Image
7
+ from diffusers import DiffusionPipeline
8
+ import torch
9
+ from PIL import Image
10
+ from diffusers import StableDiffusionPipeline
11
+ # Load the model
12
+ # model_id = "nitrosocke/Ghibli-Diffusion"
13
+ # pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
14
+ tk=os.getenv('ghtoken')
15
+ print("ttttt",tk)
16
+ model_id = "black-forest-labs/FLUX.1-dev"
17
+ # pipe =DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16,token=tk)
18
+ pipe =AutoPipelineForImage2Image.from_pretrained(model_id, torch_dtype=torch.bfloat16,token=tk)
19
+
20
+
21
+ # # 1. 选择一个基础模型,例如 SD 1.5
22
+ # base_model_id = "runwayml/stable-diffusion-v1-5"
23
+
24
+ # # 2. 加载基础模型
25
+ # pipe = StableDiffusionPipeline.from_pretrained(
26
+ # base_model_id,
27
+ # torch_dtype=torch.float32
28
+ # )
29
+
30
+ # # 3. 加载 LoRA 权重
31
+ # lora_model_id = "openfree/flux-chatgpt-ghibli-lora"
32
+ # pipe.load_lora_weights(lora_model_id)
33
+
34
+ # pipe = AutoPipelineForImage2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.bfloat16,token=True)
35
+ # pipe.load_lora_weights('openfree/flux-chatgpt-ghibli-lora', weight_name='flux-chatgpt-ghibli-lora.safetensors')
36
+ pipe.load_lora_weights("alvarobartt/ghibli-characters-flux-lora")
37
+
38
+
39
+ # Move pipeline to GPU if available
40
+ device = "cuda" if torch.cuda.is_available() else "cpu"
41
+ pipe = pipe.to(device)
42
+
43
+ # Define the inference function
44
+ @spaces.GPU
45
+ def ghibli_transform(input_image, prompt="GHBLI anime style photo", guidance_scale=3.5, num_steps=30):
46
+ print('canshu_guidance_scale',guidance_scale)
47
+ print('canshu_num_steps',num_steps)
48
+ if input_image is None:
49
+ raise gr.Error("No image uploaded! Please upload an image before clicking Transform.")
50
+
51
+ # Process the input image (keep it as PIL Image)
52
+ try:
53
+ init_image = input_image.convert("RGB").resize((1024, 768))
54
+ except Exception as e:
55
+ raise gr.Error(f"Failed to process image: {str(e)}")
56
+
57
+ # Generate the Ghibli-style image
58
+ try:
59
+ output = pipe(
60
+ prompt=prompt,
61
+ image=init_image,
62
+ # strength=strength,
63
+ # guidance_scale=guidance_scale,
64
+ # num_inference_steps=num_steps # Use the UI-provided value
65
+ ######
66
+ guidance_scale=guidance_scale,
67
+ num_inference_steps=num_steps
68
+
69
+ ######
70
+ ).images[0]
71
+ except Exception as e:
72
+ raise gr.Error(f"Pipeline error: {str(e)}")
73
+
74
+ return output
75
+
76
+ # Create the Gradio interface
77
+ with gr.Blocks(title="Transformer") as demo:
78
+ gr.Markdown("# Transformer")
79
+ gr.Markdown("Upload an image and transform it! [Website:](http://imagetoghibli.online/)")
80
+
81
+ with gr.Row():
82
+ with gr.Column():
83
+ input_img = gr.Image(label="Upload Image", type="pil")
84
+ prompt = gr.Textbox(label="Prompt", value="GHBLI anime style photo")
85
+
86
+ guidance = gr.Slider(1, 20, value=3.5, step=0.5, label="Guidance Scale")
87
+ num_steps = gr.Slider(10, 100, value=30, step=5, label="Inference Steps (Higher = Better Quality, Slower)")
88
+ submit_btn = gr.Button("Transform")
89
+ with gr.Column():
90
+ output_img = gr.Image(label="Ghibli-Style Output")
91
+
92
+ # Connect the button to the function
93
+ submit_btn.click(
94
+ fn=ghibli_transform,
95
+ inputs=[input_img, prompt, guidance, num_steps],
96
+ outputs=output_img
97
+ )
98
+
99
+ # Launch the Space with share=True for public link
100
+ demo.launch(share=True)