upload v1673430532 model
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ Adapted from https://huggingface.co/spaces/stabilityai/stable-diffusion
|
|
5 |
from tensorflow import keras
|
6 |
|
7 |
import time
|
|
|
8 |
import requests
|
9 |
|
10 |
import gradio as gr
|
@@ -17,9 +18,11 @@ from huggingface_hub import from_pretrained_keras
|
|
17 |
|
18 |
PLACEHOLDER_TOKEN="<my-cat-token>"
|
19 |
|
20 |
-
MODEL_CKPT = "chansung/my-kitty@
|
21 |
MODEL = from_pretrained_keras(MODEL_CKPT)
|
22 |
|
|
|
|
|
23 |
model = keras_cv.models.StableDiffusion(
|
24 |
img_width=img_width, img_height=img_height, jit_compile=True
|
25 |
)
|
@@ -62,7 +65,9 @@ demoInterface = gr.Interface(
|
|
62 |
title="Generate custom images with finetuned embeddings of Stable Diffusion",
|
63 |
description=description,
|
64 |
article=article,
|
65 |
-
examples=[
|
|
|
|
|
66 |
allow_flagging=False,
|
67 |
)
|
68 |
|
@@ -129,9 +134,83 @@ def update_compute_options(provider, region):
|
|
129 |
value=avalialbe_compute_options[0] if len(avalialbe_compute_options) > 0 else None
|
130 |
)
|
131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
with gr.Blocks() as hf_endpoint:
|
133 |
providers = avaliable_providers()
|
134 |
-
head_sha = "de89a67644d74f4235257b3b8e448244fe463ec0"
|
135 |
|
136 |
gr.Markdown(
|
137 |
"""
|
@@ -144,7 +223,8 @@ with gr.Blocks() as hf_endpoint:
|
|
144 |
#### Your 🤗 Access Token
|
145 |
""")
|
146 |
hf_token_input = gr.Textbox(
|
147 |
-
show_label=False
|
|
|
148 |
)
|
149 |
|
150 |
gr.Markdown("""
|
@@ -195,8 +275,8 @@ with gr.Blocks() as hf_endpoint:
|
|
195 |
show_label=False,
|
196 |
)
|
197 |
|
198 |
-
|
199 |
-
value=f"
|
200 |
interactive=False,
|
201 |
show_label=False,
|
202 |
)
|
@@ -277,6 +357,22 @@ with gr.Blocks() as hf_endpoint:
|
|
277 |
interactive=False
|
278 |
)
|
279 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
gr.TabbedInterface(
|
281 |
-
[demoInterface, hf_endpoint], ["
|
282 |
).launch(enable_queue=True)
|
|
|
5 |
from tensorflow import keras
|
6 |
|
7 |
import time
|
8 |
+
import json
|
9 |
import requests
|
10 |
|
11 |
import gradio as gr
|
|
|
18 |
|
19 |
PLACEHOLDER_TOKEN="<my-cat-token>"
|
20 |
|
21 |
+
MODEL_CKPT = "chansung/my-kitty@v1673430532"
|
22 |
MODEL = from_pretrained_keras(MODEL_CKPT)
|
23 |
|
24 |
+
head_sha = "eaa7a2f3ceaa2e58e7ca10ae2b9337ea6475fa4f"
|
25 |
+
|
26 |
model = keras_cv.models.StableDiffusion(
|
27 |
img_width=img_width, img_height=img_height, jit_compile=True
|
28 |
)
|
|
|
65 |
title="Generate custom images with finetuned embeddings of Stable Diffusion",
|
66 |
description=description,
|
67 |
article=article,
|
68 |
+
examples=[
|
69 |
+
[f"an oil painting of {PLACEHOLDER_TOKEN}", 8],
|
70 |
+
[f"A mysterious {PLACEHOLDER_TOKEN} approaches the great pyramids of egypt.", 8]],
|
71 |
allow_flagging=False,
|
72 |
)
|
73 |
|
|
|
134 |
value=avalialbe_compute_options[0] if len(avalialbe_compute_options) > 0 else None
|
135 |
)
|
136 |
|
137 |
+
def submit(
|
138 |
+
hf_token_input,
|
139 |
+
endpoint_name_input,
|
140 |
+
provider_selector,
|
141 |
+
region_selector,
|
142 |
+
repository_selector,
|
143 |
+
task_selector,
|
144 |
+
framework_selector,
|
145 |
+
compute_selector,
|
146 |
+
min_node_selector,
|
147 |
+
max_node_selector,
|
148 |
+
security_selector
|
149 |
+
):
|
150 |
+
compute_resources = compute_selector.split("·")
|
151 |
+
accelerator = compute_resources[0][:3].strip()
|
152 |
+
|
153 |
+
size_l_index = compute_resources[0].index("[") - 1
|
154 |
+
size_r_index = compute_resources[0].index("]")
|
155 |
+
size = compute_resources[0][size_l_index : size_r_index].strip()
|
156 |
+
|
157 |
+
type = compute_resources[-1].strip()
|
158 |
+
|
159 |
+
payload = {
|
160 |
+
"accountId": repository_selector.split("/")[0],
|
161 |
+
"compute": {
|
162 |
+
"accelerator": accelerator.lower(),
|
163 |
+
"instanceSize": size[1:],
|
164 |
+
"instanceType": type,
|
165 |
+
"scaling": {
|
166 |
+
"maxReplica": int(max_node_selector),
|
167 |
+
"minReplica": int(min_node_selector)
|
168 |
+
}
|
169 |
+
},
|
170 |
+
"model": {
|
171 |
+
"framework": framework_selector.lower(),
|
172 |
+
"image": {
|
173 |
+
"huggingface": {}
|
174 |
+
},
|
175 |
+
"repository": repository_selector.lower(),
|
176 |
+
"revision": head_sha,
|
177 |
+
"task": task_selector.lower()
|
178 |
+
},
|
179 |
+
"name": endpoint_name_input.strip(),
|
180 |
+
"provider": {
|
181 |
+
"region": region_selector.split("/")[0].lower(),
|
182 |
+
"vendor": provider_selector.lower()
|
183 |
+
},
|
184 |
+
"type": security_selector.lower()
|
185 |
+
}
|
186 |
+
|
187 |
+
print(payload)
|
188 |
+
|
189 |
+
payload = json.dumps(payload)
|
190 |
+
print(payload)
|
191 |
+
|
192 |
+
headers = {
|
193 |
+
"Authorization": f"Bearer {hf_token_input.strip()}",
|
194 |
+
"Content-Type": "application/json",
|
195 |
+
}
|
196 |
+
endpoint_url = f"https://api.endpoints.huggingface.cloud/endpoint"
|
197 |
+
print(endpoint_url)
|
198 |
+
|
199 |
+
response = requests.post(endpoint_url, headers=headers, data=payload)
|
200 |
+
|
201 |
+
if response.status_code == 400:
|
202 |
+
return f"{response.text}. Malformed data in {payload}"
|
203 |
+
elif response.status_code == 401:
|
204 |
+
return "Invalid token"
|
205 |
+
elif response.status_code == 409:
|
206 |
+
return f"Endpoint {endpoint_name_input} already exists"
|
207 |
+
elif response.status_code == 202:
|
208 |
+
return f"Endpoint {endpoint_name_input} created successfully on {provider_selector.lower()} using {repository_selector.lower()}@{head_sha}.\nPlease check out the progress at https://ui.endpoints.huggingface.co/endpoints."
|
209 |
+
else:
|
210 |
+
return f"something went wrong {response.status_code} = {response.text}"
|
211 |
+
|
212 |
with gr.Blocks() as hf_endpoint:
|
213 |
providers = avaliable_providers()
|
|
|
214 |
|
215 |
gr.Markdown(
|
216 |
"""
|
|
|
223 |
#### Your 🤗 Access Token
|
224 |
""")
|
225 |
hf_token_input = gr.Textbox(
|
226 |
+
show_label=False,
|
227 |
+
type="password"
|
228 |
)
|
229 |
|
230 |
gr.Markdown("""
|
|
|
275 |
show_label=False,
|
276 |
)
|
277 |
|
278 |
+
revision_selector = gr.Textbox(
|
279 |
+
value=f"v1673430532/{head_sha[:7]}",
|
280 |
interactive=False,
|
281 |
show_label=False,
|
282 |
)
|
|
|
357 |
interactive=False
|
358 |
)
|
359 |
|
360 |
+
submit_button.click(
|
361 |
+
submit,
|
362 |
+
inputs=[
|
363 |
+
hf_token_input,
|
364 |
+
endpoint_name_input,
|
365 |
+
provider_selector,
|
366 |
+
region_selector,
|
367 |
+
repository_selector,
|
368 |
+
task_selector,
|
369 |
+
framework_selector,
|
370 |
+
compute_selector,
|
371 |
+
min_node_selector,
|
372 |
+
max_node_selector,
|
373 |
+
security_selector],
|
374 |
+
outputs=status_txt)
|
375 |
+
|
376 |
gr.TabbedInterface(
|
377 |
+
[demoInterface, hf_endpoint], ["Playground", " Deploy on 🤗 Endpoint"]
|
378 |
).launch(enable_queue=True)
|