Spaces:
Sleeping
Sleeping
Shawn Wilkinson
commited on
Commit
·
7ef9aca
1
Parent(s):
655cdae
Initial Files
Browse files- app.py +34 -31
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,27 +1,19 @@
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
|
|
|
|
4 |
|
5 |
-
#
|
6 |
-
|
7 |
-
import torch
|
8 |
-
|
9 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
-
model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
11 |
-
|
12 |
-
if torch.cuda.is_available():
|
13 |
-
torch_dtype = torch.float16
|
14 |
-
else:
|
15 |
-
torch_dtype = torch.float32
|
16 |
-
|
17 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
18 |
-
pipe = pipe.to(device)
|
19 |
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
21 |
MAX_IMAGE_SIZE = 1024
|
22 |
|
23 |
-
|
24 |
-
# @spaces.GPU #[uncomment to use ZeroGPU]
|
25 |
def infer(
|
26 |
prompt,
|
27 |
negative_prompt,
|
@@ -36,20 +28,31 @@ def infer(
|
|
36 |
if randomize_seed:
|
37 |
seed = random.randint(0, MAX_SEED)
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
examples = [
|
55 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
@@ -130,7 +133,7 @@ with gr.Blocks(css=css) as demo:
|
|
130 |
minimum=1,
|
131 |
maximum=50,
|
132 |
step=1,
|
133 |
-
value=
|
134 |
)
|
135 |
|
136 |
gr.Examples(examples=examples, inputs=[prompt])
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
7 |
import random
|
8 |
+
import io
|
9 |
+
from PIL import Image
|
10 |
|
11 |
+
# Load environment variables
|
12 |
+
load_dotenv()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
MAX_SEED = np.iinfo(np.int32).max
|
15 |
MAX_IMAGE_SIZE = 1024
|
16 |
|
|
|
|
|
17 |
def infer(
|
18 |
prompt,
|
19 |
negative_prompt,
|
|
|
28 |
if randomize_seed:
|
29 |
seed = random.randint(0, MAX_SEED)
|
30 |
|
31 |
+
url = "https://inference.prodia.com/v2/job"
|
32 |
+
headers = {
|
33 |
+
'accept': 'image/jpeg',
|
34 |
+
'content-type': 'application/json',
|
35 |
+
'authorization': f'Bearer {os.getenv("PRODIA_KEY")}'
|
36 |
+
}
|
37 |
+
data = {
|
38 |
+
"type": "inference.flux.dev.txt2img.v1",
|
39 |
+
"config": {
|
40 |
+
"prompt": prompt,
|
41 |
+
"guidance_scale": guidance_scale,
|
42 |
+
"steps": num_inference_steps,
|
43 |
+
"width": width,
|
44 |
+
"height": height
|
45 |
+
}
|
46 |
+
}
|
47 |
+
|
48 |
+
response = requests.post(url, headers=headers, data=json.dumps(data))
|
49 |
+
|
50 |
+
if response.status_code == 200:
|
51 |
+
image_bytes = io.BytesIO(response.content)
|
52 |
+
image = Image.open(image_bytes)
|
53 |
+
return image, seed
|
54 |
+
else:
|
55 |
+
return f"Error: {response.status_code}, {response.text}", seed
|
56 |
|
57 |
examples = [
|
58 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
|
|
133 |
minimum=1,
|
134 |
maximum=50,
|
135 |
step=1,
|
136 |
+
value=25, # Replace with defaults that work for your model
|
137 |
)
|
138 |
|
139 |
gr.Examples(examples=examples, inputs=[prompt])
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ diffusers
|
|
3 |
invisible_watermark
|
4 |
torch
|
5 |
transformers
|
6 |
-
xformers
|
|
|
|
3 |
invisible_watermark
|
4 |
torch
|
5 |
transformers
|
6 |
+
xformers
|
7 |
+
python-dotenv
|