Update app.py
Browse files
app.py
CHANGED
@@ -1,61 +1,43 @@
|
|
1 |
import gradio as gr
|
2 |
-
import requests
|
3 |
import os
|
|
|
4 |
from PIL import Image
|
5 |
-
from io import BytesIO
|
6 |
-
from tqdm import tqdm
|
7 |
-
import time
|
8 |
|
9 |
-
# Load token from
|
10 |
-
token = os.environ["HF_TOKEN"]
|
11 |
|
12 |
-
#
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
trigger_word = "T shirt design, TshirtDesignAF, "
|
15 |
|
16 |
def generate_image(prompt):
|
17 |
-
print("Generating image with prompt:", prompt)
|
18 |
-
api_url = f"https://api-inference.huggingface.co/models/{repo}"
|
19 |
-
|
20 |
-
headers = {
|
21 |
-
"Authorization": f"Bearer {token}"
|
22 |
-
}
|
23 |
-
|
24 |
full_prompt = f"{prompt} {trigger_word}"
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
29 |
"num_inference_steps": 30,
|
30 |
-
"scheduler": "DPMSolverMultistepScheduler"
|
31 |
-
}
|
32 |
-
|
33 |
-
|
34 |
-
error_count = 0
|
35 |
-
pbar = tqdm(total=None, desc="Loading model")
|
36 |
-
while True:
|
37 |
-
print("Sending request to API...")
|
38 |
-
response = requests.post(api_url, headers=headers, json=payload)
|
39 |
-
print("API response status code:", response.status_code)
|
40 |
-
if response.status_code == 200:
|
41 |
-
print("Image generation successful!")
|
42 |
-
return Image.open(BytesIO(response.content))
|
43 |
-
elif response.status_code == 503:
|
44 |
-
time.sleep(1)
|
45 |
-
pbar.update(1)
|
46 |
-
elif response.status_code == 500 and error_count < 5:
|
47 |
-
time.sleep(1)
|
48 |
-
error_count += 1
|
49 |
-
else:
|
50 |
-
print("API Error:", response.status_code)
|
51 |
-
raise Exception(f"API Error: {response.status_code}")
|
52 |
|
|
|
53 |
iface = gr.Interface(
|
54 |
fn=generate_image,
|
55 |
inputs=gr.Textbox(lines=2, placeholder="Type your prompt here..."),
|
56 |
outputs="image",
|
57 |
title="TShirt Design XL Image Generator",
|
58 |
-
description="Powered by Redmond.AI —
|
59 |
examples=[["Cute Panda"], ["Skull"]]
|
60 |
)
|
61 |
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import os
|
3 |
+
from huggingface_hub import InferenceClient
|
4 |
from PIL import Image
|
|
|
|
|
|
|
5 |
|
6 |
+
# Load token from environment
|
7 |
+
token = os.environ["HF_TOKEN"]
|
8 |
|
9 |
+
# Setup inference client
|
10 |
+
client = InferenceClient(
|
11 |
+
model="artificialguybr/TshirtDesignRedmond-V2",
|
12 |
+
provider="fal-ai",
|
13 |
+
token=token,
|
14 |
+
)
|
15 |
+
|
16 |
+
# Trigger words as per model instructions
|
17 |
trigger_word = "T shirt design, TshirtDesignAF, "
|
18 |
|
19 |
def generate_image(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
full_prompt = f"{prompt} {trigger_word}"
|
21 |
+
print("Generating image with prompt:", full_prompt)
|
22 |
+
|
23 |
+
# Generate image
|
24 |
+
image = client.text_to_image(
|
25 |
+
full_prompt,
|
26 |
+
params={
|
27 |
+
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details...)",
|
28 |
"num_inference_steps": 30,
|
29 |
+
"scheduler": "DPMSolverMultistepScheduler",
|
30 |
+
}
|
31 |
+
)
|
32 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
# Gradio interface
|
35 |
iface = gr.Interface(
|
36 |
fn=generate_image,
|
37 |
inputs=gr.Textbox(lines=2, placeholder="Type your prompt here..."),
|
38 |
outputs="image",
|
39 |
title="TShirt Design XL Image Generator",
|
40 |
+
description="Powered by Redmond.AI — Generates T-shirt designs from prompts. Trigger tags: 'T shirt design, TshirtDesignAF'.",
|
41 |
examples=[["Cute Panda"], ["Skull"]]
|
42 |
)
|
43 |
|