File size: 1,287 Bytes
2cc7460
7f5f8e9
 
670816f
 
9d1b8e4
73aa616
7f5f8e9
670816f
73aa616
2cc7460
f6f44a7
2cc7460
73aa616
2cc7460
63cbc5c
7f5f8e9
2cc7460
9614ae1
73aa616
2cc7460
 
f6f44a7
73aa616
 
e004d17
73aa616
2cc7460
c95ed83
 
 
 
 
73aa616
2cc7460
c95ed83
2cc7460
 
c95ed83
 
2cc7460
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import requests
import gradio as gr
from PIL import Image
from io import BytesIO

# Function to generate image from the Hugging Face API
def generate_image(prompt):
    API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
    API_TOKEN = os.getenv("HF_READ_TOKEN")  # Ensure the token is set in your environment
    headers = {"Authorization": f"Bearer {API_TOKEN}"}

    payload = {
        "inputs": prompt
    }

    # Call the Hugging Face API to generate the image
    response = requests.post(API_URL, headers=headers, json=payload)

    # Check if the request was successful
    if response.status_code != 200:
        return f"Error: {response.status_code}, {response.text}"

    # Convert the response content into a PIL image
    image = Image.open(BytesIO(response.content))

    return image  # Return the image to Gradio

# Define the chatbot function to return the generated image
def chatbot(prompt):
    image = generate_image(prompt)
    return image

# Create the Gradio interface with the same UI/UX
interface = gr.Interface(
    fn=chatbot,
    inputs="text",
    outputs="image",
    title="prompthero/openjourney",
    description="Enter a text prompt and get an AI-generated image."
)

# Launch the interface
interface.launch()