File size: 1,371 Bytes
2cc7460
7f5f8e9
 
670816f
 
9d1b8e4
670816f
7f5f8e9
670816f
 
2cc7460
f6f44a7
2cc7460
670816f
2cc7460
63cbc5c
7f5f8e9
2cc7460
9614ae1
670816f
2cc7460
 
f6f44a7
670816f
 
 
 
 
e004d17
670816f
2cc7460
c95ed83
 
 
 
 
670816f
2cc7460
c95ed83
2cc7460
 
c95ed83
 
2cc7460
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import requests
import gradio as gr
from PIL import Image
from io import BytesIO

# Function to generate image from Hugging Face API
def generate_image(prompt):
    API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
    API_TOKEN = os.getenv("HF_READ_TOKEN")  # Make sure the token is in your environment
    headers = {"Authorization": f"Bearer {API_TOKEN}"}

    payload = {
        "inputs": prompt,
    }

    # Call the Hugging Face API to generate the image
    response = requests.post(API_URL, headers=headers, json=payload)

    # Check if the response was successful
    if response.status_code != 200:
        return f"Error: {response.status_code}, {response.text}"

    # Ensure the response contains an image by loading it into PIL
    try:
        image = Image.open(BytesIO(response.content))
    except Exception as e:
        return f"Error processing image: {str(e)}"

    return image  # Return the PIL image object

# Define the chatbot function to return the generated image
def chatbot(prompt):
    image = generate_image(prompt)
    return image

# Create the Gradio interface
interface = gr.Interface(
    fn=chatbot,
    inputs="text",
    outputs="image",
    title="prompthero/openjourney",
    description="Enter a text prompt and get an AI-generated image."
)

# Launch the interface
interface.launch()