Spaces:
Runtime error
Runtime error
| """ | |
| Adapted from https://huggingface.co/spaces/stabilityai/stable-diffusion | |
| """ | |
| from tensorflow import keras | |
| import time | |
| import gradio as gr | |
| import keras_cv | |
| from constants import css, examples, img_height, img_width, num_images_to_gen | |
| from share_btn import community_icon_html, loading_icon_html, share_js | |
| from huggingface_hub import from_pretrained_keras | |
| from huggingface_hub import Repository | |
| import requests | |
| # MODEL_CKPT = "chansung/textual-inversion-pipeline@v1673026791" | |
| # MODEL = from_pretrained_keras(MODEL_CKPT) | |
| # model = keras_cv.models.StableDiffusion( | |
| # img_width=img_width, img_height=img_height, jit_compile=True | |
| # ) | |
| # model._text_encoder = MODEL | |
| # model._text_encoder.compile(jit_compile=True) | |
| # # Warm-up the model. | |
| # _ = model.text_to_image("Teddy bear", batch_size=num_images_to_gen) | |
| def generate_image_fn(prompt: str, unconditional_guidance_scale: int) -> list: | |
| start_time = time.time() | |
| # `images is an `np.ndarray`. So we convert it to a list of ndarrays. | |
| # Each ndarray represents a generated image. | |
| # Reference: https://gradio.app/docs/#gallery | |
| images = model.text_to_image( | |
| prompt, | |
| batch_size=num_images_to_gen, | |
| unconditional_guidance_scale=unconditional_guidance_scale, | |
| ) | |
| end_time = time.time() | |
| print(f"Time taken: {end_time - start_time} seconds.") | |
| return [image for image in images] | |
| demoInterface = gr.Interface( | |
| generate_image_fn, | |
| inputs=[ | |
| gr.Textbox( | |
| label="Enter your prompt", | |
| max_lines=1, | |
| # placeholder="cute Sundar Pichai creature", | |
| ), | |
| gr.Slider(value=40, minimum=8, maximum=50, step=1), | |
| ], | |
| outputs=gr.Gallery().style(grid=[2], height="auto"), | |
| # examples=[["cute Sundar Pichai creature", 8], ["Hello kitty", 8]], | |
| allow_flagging=False, | |
| ) | |
| def avaliable_providers(): | |
| providers = [] | |
| headers = { | |
| "Content-Type": "application/json", | |
| } | |
| endpoint_url = "https://api.endpoints.huggingface.cloud/provider" | |
| response = requests.get(endpoint_url, headers=headers) | |
| for provider in response.json()['items']: | |
| if provider['status'] == 'available': | |
| providers.append(provider['vendor']) | |
| return providers | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| """ | |
| # Your own Stable Diffusion on Google Cloud Platform | |
| """) | |
| with gr.Row(): | |
| gcp_project_id = gr.Textbox( | |
| label="GCP project ID", | |
| ) | |
| gcp_region = gr.Dropdown( | |
| ["us-central1", "asia鈥慹ast1", "asia-northeast1"], | |
| value="us-central1", | |
| interactive=True, | |
| label="GCP Region" | |
| ) | |
| gr.Markdown( | |
| """ | |
| Configurations on scalability | |
| """) | |
| with gr.Row(): | |
| min_nodes = gr.Slider( | |
| label="minimum number of nodes", | |
| minimum=1, | |
| maximum=10) | |
| max_nodes = gr.Slider( | |
| label="maximum number of nodes", | |
| minimum=1, | |
| maximum=10) | |
| btn = gr.Button(value="Ready to Deploy!") | |
| # btn.click(mirror, inputs=[im], outputs=[im_2]) | |
| def update_regions(provider): | |
| avalialbe_regions = [] | |
| headers = { | |
| "Content-Type": "application/json", | |
| } | |
| endpoint_url = f"https://api.endpoints.huggingface.cloud/provider/{provider}/region" | |
| response = requests.get(endpoint_url, headers=headers) | |
| for region in response.json()['items']: | |
| if region['status'] == 'available': | |
| avalialbe_regions.append(f"{region['region']}/{region['label']}") | |
| return gr.Dropdown.update( | |
| choices=avalialbe_regions, | |
| value=avalialbe_regions[0] if len(avalialbe_regions) > 0 else None | |
| ) | |
| def update_compute_options(provider, region): | |
| region = region.split("/")[0] | |
| avalialbe_compute_options = [] | |
| headers = { | |
| "Content-Type": "application/json", | |
| } | |
| endpoint_url = f"https://api.endpoints.huggingface.cloud/provider/{provider}/region/{region}/compute" | |
| print(endpoint_url) | |
| response = requests.get(endpoint_url, headers=headers) | |
| for compute in response.json()['items']: | |
| if compute['status'] == 'available': | |
| accelerator = compute['accelerator'] | |
| numAccelerators = compute['numAccelerators'] | |
| memoryGb = compute['memoryGb'].replace("Gi", "GB") | |
| architecture = compute['architecture'] | |
| instanceType = compute['instanceType'] | |
| type = f"{numAccelerators}vCPU {memoryGb} 路 {architecture}" if accelerator == "cpu" else f"{numAccelerators}x {architecture}" | |
| avalialbe_compute_options.append( | |
| f"{compute['accelerator'].upper()} [{compute['instanceSize']}] 路 {type} 路 {instanceType}" | |
| ) | |
| return gr.Dropdown.update( | |
| choices=avalialbe_compute_options, | |
| value=avalialbe_compute_options[0] if len(avalialbe_compute_options) > 0 else None | |
| ) | |
| with gr.Blocks() as demo2: | |
| gr.Markdown( | |
| """ | |
| ## Deploy Stable Diffusion on 馃 Endpoint | |
| --- | |
| """) | |
| gr.Markdown(""" | |
| #### Your 馃 Access Token | |
| """) | |
| hf_token_input = gr.Textbox( | |
| show_label=False | |
| ) | |
| gr.Markdown(""" | |
| #### Decide the Endpoint name | |
| """) | |
| endpoint_name_input = gr.Textbox( | |
| show_label=False | |
| ) | |
| providers = avaliable_providers() | |
| head_sha = "398e79c789669981a2ab1da1fbdafc3998c7b08a" | |
| with gr.Row(): | |
| gr.Markdown(""" | |
| #### Cloud Provider | |
| """) | |
| gr.Markdown(""" | |
| #### Cloud Region | |
| """) | |
| with gr.Row(): | |
| provider_selector = gr.Dropdown( | |
| choices=providers, | |
| interactive=True, | |
| show_label=False, | |
| ) | |
| region_selector = gr.Dropdown( | |
| [], | |
| value="", | |
| interactive=True, | |
| show_label=False, | |
| ) | |
| provider_selector.change(update_regions, inputs=provider_selector, outputs=region_selector) | |
| with gr.Row(): | |
| gr.Markdown(""" | |
| #### Target Model | |
| """) | |
| gr.Markdown(""" | |
| #### Target Model Version(branch) | |
| """) | |
| with gr.Row(): | |
| repository_selector = gr.Textbox( | |
| value="chansung/my-kitty", | |
| interactive=False, | |
| show_label=False, | |
| ) | |
| repository_selector = gr.Textbox( | |
| value=f"v1673365013/{head_sha[:7]}", | |
| interactive=False, | |
| show_label=False, | |
| ) | |
| with gr.Row(): | |
| gr.Markdown(""" | |
| #### Task | |
| """) | |
| gr.Markdown(""" | |
| #### Framework | |
| """) | |
| with gr.Row(): | |
| task_selector = gr.Textbox( | |
| value="Custom", | |
| interactive=False, | |
| show_label=False, | |
| ) | |
| framework_selector = gr.Textbox( | |
| value="TensorFlow", | |
| interactive=False, | |
| show_label=False, | |
| ) | |
| gr.Markdown(""" | |
| #### Select Compute Instance Type | |
| """) | |
| compute_selector = gr.Dropdown( | |
| [], | |
| value="", | |
| interactive=True, | |
| show_label=False, | |
| ) | |
| region_selector.change(update_compute_options, inputs=[provider_selector, region_selector], outputs=compute_selector) | |
| with gr.Row(): | |
| gr.Markdown(""" | |
| #### Min Number of Nodes | |
| """) | |
| gr.Markdown(""" | |
| #### Max Number of Nodes | |
| """) | |
| gr.Markdown(""" | |
| #### Security Level | |
| """) | |
| with gr.Row(): | |
| min_node_selector = gr.Number( | |
| value=1, | |
| interactive=True, | |
| show_label=False, | |
| ) | |
| max_node_selector = gr.Number( | |
| value=1, | |
| interactive=True, | |
| show_label=False, | |
| ) | |
| security_selector = gr.Radio( | |
| choices=["Protected", "Public", "Private"], | |
| value="Public", | |
| interactive=True, | |
| show_label=False, | |
| ) | |
| submit_button = gr.Button( | |
| value="Submit", | |
| ) | |
| # submit_button.click( | |
| # submit, | |
| # inputs=[ | |
| # hf_token_input, | |
| # endpoint_name_input, | |
| # provider_selector, | |
| # region_selector, | |
| # repository_selector, | |
| # revision_selector, | |
| # task_selector, | |
| # framework_selector, | |
| # min_node_selector, | |
| # max_node_selector, | |
| # security_selector]) | |
| status_txt = gr.Textbox( | |
| value="any status update will be displayed here", | |
| interactive=False | |
| ) | |
| gr.TabbedInterface( | |
| [demoInterface, demo, demo2], ["Try-out", "馃殌 Deploy on GCP", " Deploy on 馃 Endpoint"] | |
| ).launch(enable_queue=True) |