Spaces:
Running
Running
File size: 1,160 Bytes
0b70041 2cc7460 9d1b8e4 2cc7460 f6f44a7 2cc7460 63cbc5c 2cc7460 9614ae1 2cc7460 f6f44a7 2cc7460 e004d17 2cc7460 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
import requests
import os
import json
# Define the function that queries the Hugging Face API
def generate_image_from_model(prompt):
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
payload = {
"inputs": prompt,
}
response = requests.post(API_URL, headers=headers, json=payload)
# Handle errors
if response.status_code != 200:
return f"Error: {response.status_code}, {response.text}"
# Assuming the response is an image, save it temporarily and return the path
image_data = response.content
image_path = "generated_image.png"
with open(image_path, "wb") as img_file:
img_file.write(image_data)
return image_path # Return the path to the image
# Create the Gradio interface
interface = gr.Interface(
fn=generate_image_from_model,
inputs="text",
outputs="image",
title="OpenJourney Text-to-Image",
description="Enter a prompt to generate an image using OpenJourney model."
)
# Launch the interface
interface.launch()
|