imagencpu / api_example.py
ovedrive's picture
merge controlnet
0443b19
import requests
import io
from PIL import Image
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Hugging Face API token (need to set in .env or environment)
HF_TOKEN = os.getenv("HF_TOKEN")
# API base URL
API_BASE = "http://localhost:8000"
def text_to_image(prompt=None, model=None, negative_prompt=None, guidance_scale=None, num_inference_steps=None):
"""
Generate image from text using the API
All parameters are optional and will use server defaults if not provided
"""
url = f"{API_BASE}/text-to-image"
# Prepare form data - only add parameters that are provided
# (otherwise use server defaults)
data = {}
if prompt is not None:
data["prompt"] = prompt
if model is not None:
data["model"] = model
if negative_prompt is not None:
data["negative_prompt"] = negative_prompt
if guidance_scale is not None:
data["guidance_scale"] = guidance_scale
if num_inference_steps is not None:
data["num_inference_steps"] = num_inference_steps
# Make API request
response = requests.post(url, data=data)
if response.status_code == 200:
# Convert response to PIL image
image = Image.open(io.BytesIO(response.content))
return image
else:
print(f"Error: {response.status_code}")
print(response.text)
return None
def image_to_image(image_path, prompt=None, model=None, negative_prompt=None,
guidance_scale=None, num_inference_steps=None, use_controlnet=False):
"""
Transform image using the API
Only image_path is required, other parameters are optional and will use server defaults
"""
url = f"{API_BASE}/image-to-image"
# Prepare form data - only add parameters that are provided
data = {}
if prompt is not None:
data["prompt"] = prompt
if model is not None:
data["model"] = model
if negative_prompt is not None:
data["negative_prompt"] = negative_prompt
if guidance_scale is not None:
data["guidance_scale"] = guidance_scale
if num_inference_steps is not None:
data["num_inference_steps"] = num_inference_steps
if use_controlnet:
data["use_controlnet"] = "True"
# Prepare the image file
files = {
"image": open(image_path, "rb")
}
# Make API request
response = requests.post(url, data=data, files=files)
if response.status_code == 200:
# Convert response to PIL image
image = Image.open(io.BytesIO(response.content))
return image
else:
print(f"Error: {response.status_code}")
print(response.text)
return None
if __name__ == "__main__":
# Example usage
print("Text to Image example:")
# Can call without arguments to use server defaults
image = text_to_image()
if image:
image.save("text2img_output.png")
print("Image saved as text2img_output.png")
# Or with specific parameters
# image = text_to_image("A beautiful mountain landscape at sunset")
print("Image to Image example (requires an input image):")
# Uncomment and modify path to run:
# result = image_to_image("input.png") # Uses default prompt
# if result:
# result.save("img2img_output.png")
# print("Image saved as img2img_output.png")
# Example with ControlNet depth-based transformation:
# result = image_to_image("input.png", prompt="A futuristic cityscape", use_controlnet=True)
# if result:
# result.save("controlnet_output.png")
# print("Image saved as controlnet_output.png")