Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,33 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline
|
2 |
import torch
|
|
|
3 |
import gradio as gr
|
4 |
|
5 |
-
# Load the model
|
6 |
-
|
7 |
-
pipe
|
8 |
-
pipe.to("cpu") # Use "cuda" if you have a GPU
|
9 |
|
10 |
-
#
|
11 |
def generate_image(prompt):
|
12 |
-
image = pipe(prompt
|
|
|
|
|
|
|
|
|
13 |
return image
|
14 |
-
|
15 |
-
#
|
16 |
-
def
|
17 |
image = generate_image(prompt)
|
18 |
return image
|
19 |
|
20 |
-
#
|
21 |
-
interface = gr.Interface(
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
-
# Launch the interface
|
24 |
-
interface.launch()
|
|
|
|
|
1 |
import torch
|
2 |
+
from diffusers import FluxPipeline
|
3 |
import gradio as gr
|
4 |
|
5 |
+
# Load the model and set to CPU
|
6 |
+
pipe = FluxPipeline.from_pretrained("Shakker-Labs/AWPortrait-FL", torch_dtype=torch.float32)
|
7 |
+
pipe.to("cpu") # Set to CPU
|
|
|
8 |
|
9 |
+
# Define the function to generate an image from a given prompt
|
10 |
def generate_image(prompt):
|
11 |
+
image = pipe(prompt,
|
12 |
+
num_inference_steps=24,
|
13 |
+
guidance_scale=3.5,
|
14 |
+
width=768, height=1024).images[0]
|
15 |
+
image.save(f"example.png")
|
16 |
return image
|
17 |
+
|
18 |
+
# Create a Gradio interface
|
19 |
+
def inference(prompt):
|
20 |
image = generate_image(prompt)
|
21 |
return image
|
22 |
|
23 |
+
# Gradio Interface
|
24 |
+
interface = gr.Interface(
|
25 |
+
fn=inference,
|
26 |
+
inputs=gr.Textbox(lines=2, placeholder="Enter your image description here..."),
|
27 |
+
outputs="image",
|
28 |
+
title="Text-to-Image Generator",
|
29 |
+
description="Enter a text prompt to generate an image using AWPortrait-FL."
|
30 |
+
)
|
31 |
|
32 |
+
# Launch the Gradio interface
|
33 |
+
interface.launch()
|