Spaces:
Runtime error
Runtime error
File size: 1,691 Bytes
c69e5b3 9a101a6 ab8d6a0 9a101a6 c69e5b3 9a101a6 c69e5b3 9a101a6 12c8ac7 1acd4a4 f97958b 1acd4a4 c69e5b3 12c8ac7 1acd4a4 12c8ac7 1acd4a4 c69e5b3 f97958b 12c8ac7 c69e5b3 9a101a6 0ca268f 9a101a6 2237481 9a101a6 868779d 9a101a6 1acd4a4 9a101a6 1acd4a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from gradio_client import Client, file
import gradio as gr
from PIL import Image
import io
# Configuration for Hugging Face Spaces
CAPTION_SPACE = "gokaygokay/SD3-Long-Captioner"
LLM_SPACE = "hysts/zephyr-7b"
# Initialize Gradio client for captioning and language model
captioning_client = Client(CAPTION_SPACE)
llm_client = Client(LLM_SPACE)
def generate_compliment(image):
caption_text = ""
compliment_text = ""
# Convert PIL image to bytes
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
image_bytes = buffered.getvalue()
# Retrieve caption from the captioning model
try:
caption_response = captioning_client.predict("/create_captions_rich", {"image": file(image_bytes)})
caption_text = caption_response.data[0]
except Exception as e:
return "Error", f"Failed to get caption. Exception: {str(e)}"
# Generate compliment using the language model
try:
llm_response = llm_client.predict({"system_prompt": SYSTEM_PROMPT, "message": f"Caption: {caption_text}\nCompliment: "})
compliment_text = llm_response.data[0]
except Exception as e:
return "Error", f"Failed to generate compliment. Exception: {str(e)}"
return caption_text, compliment_text
# Gradio interface
iface = gr.Interface(
fn=generate_compliment,
inputs=gr.Image(type="pil", label="Upload Image"),
outputs=[
gr.Textbox(label="Caption"),
gr.Textbox(label="Compliment")
],
title="Compliment Bot π",
description="Upload your headshot and get a personalized compliment!",
live=True # Set live=True to launch the interface immediately
)
iface.launch()
|