Spaces:
Running
Running
File size: 1,265 Bytes
2cc7460 7f5f8e9 9d1b8e4 7f5f8e9 5ed1676 7f5f8e9 2cc7460 f6f44a7 2cc7460 7f5f8e9 2cc7460 63cbc5c 7f5f8e9 2cc7460 9614ae1 7f5f8e9 2cc7460 f6f44a7 7f5f8e9 2cc7460 7f5f8e9 e004d17 7f5f8e9 2cc7460 c95ed83 2cc7460 c95ed83 2cc7460 c95ed83 2cc7460 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import os
import requests
import gradio as gr
# Define the function to query Hugging Face API for image generation
def generate_image(prompt):
API_URL = "https://api-inference.huggingface.co/models/KingNish/flux-me"
API_TOKEN = os.getenv("HF_READ_TOKEN") # Ensure the token is set in your environment
headers = {"Authorization": f"Bearer {API_TOKEN}"}
payload = {
"inputs": prompt
}
# Call the Hugging Face API to generate the image
response = requests.post(API_URL, headers=headers, json=payload)
# Check if the request was successful
if response.status_code != 200:
return f"Error: {response.status_code}, {response.text}"
# Save the generated image
image_path = "generated_image.png"
with open(image_path, "wb") as f:
f.write(response.content)
return image_path
# Define the chatbot function to return the generated image
def chatbot(prompt):
image = generate_image(prompt)
return image
# Create the Gradio interface with the same UI/UX
interface = gr.Interface(
fn=chatbot,
inputs="text",
outputs="image",
title="prompthero/openjourney",
description="Enter a text prompt and get an AI-generated image."
)
# Launch the interface
interface.launch()
|