File size: 1,179 Bytes
424839d
50bfe7a
46a0585
50bfe7a
fe63753
50bfe7a
 
 
789ecb4
424839d
 
 
50bfe7a
fe63753
 
789ecb4
 
 
fe63753
 
789ecb4
 
 
 
 
 
 
 
 
 
 
50bfe7a
 
789ecb4
 
 
 
 
50bfe7a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import os
import torch
from diffusers import FluxPipeline # type: ignore
import gradio as gr # type: ignore
from huggingface_hub import login, InferenceClient

pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power

token = os.getenv("HF_TOKEN")
login(token=token)


client = InferenceClient(
	provider="together",
	api_key=token,
    model="black-forest-labs/FLUX.1-dev",
    token=token
)

def generate_image(prompt):
    image = pipe(
        prompt,
        height=1024,
        width=1024,
        guidance_scale=3.5,
        num_inference_steps=50,
        max_sequence_length=512,
        generator=torch.Generator("cpu").manual_seed(0)
    ).images[0]
    return image

gradio_app = gr.Interface(
    fn=generate_image,
    inputs=gr.inputs.Textbox(label="Entrez une description"),
    outputs=gr.outputs.Image(label="Image générée"),
    title="Générateur d'images IA",
    description="Entrez une description et générez une image correspondante."
)

if __name__ == "__main__":
    gradio_app.launch()