File size: 3,783 Bytes
8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 0443b19 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 0443b19 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 fc21604 8247a04 0443b19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import requests
import io
from PIL import Image
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Hugging Face API token (need to set in .env or environment)
HF_TOKEN = os.getenv("HF_TOKEN")
# API base URL
API_BASE = "http://localhost:8000"
def text_to_image(prompt=None, model=None, negative_prompt=None, guidance_scale=None, num_inference_steps=None):
"""
Generate image from text using the API
All parameters are optional and will use server defaults if not provided
"""
url = f"{API_BASE}/text-to-image"
# Prepare form data - only add parameters that are provided
# (otherwise use server defaults)
data = {}
if prompt is not None:
data["prompt"] = prompt
if model is not None:
data["model"] = model
if negative_prompt is not None:
data["negative_prompt"] = negative_prompt
if guidance_scale is not None:
data["guidance_scale"] = guidance_scale
if num_inference_steps is not None:
data["num_inference_steps"] = num_inference_steps
# Make API request
response = requests.post(url, data=data)
if response.status_code == 200:
# Convert response to PIL image
image = Image.open(io.BytesIO(response.content))
return image
else:
print(f"Error: {response.status_code}")
print(response.text)
return None
def image_to_image(image_path, prompt=None, model=None, negative_prompt=None,
guidance_scale=None, num_inference_steps=None, use_controlnet=False):
"""
Transform image using the API
Only image_path is required, other parameters are optional and will use server defaults
"""
url = f"{API_BASE}/image-to-image"
# Prepare form data - only add parameters that are provided
data = {}
if prompt is not None:
data["prompt"] = prompt
if model is not None:
data["model"] = model
if negative_prompt is not None:
data["negative_prompt"] = negative_prompt
if guidance_scale is not None:
data["guidance_scale"] = guidance_scale
if num_inference_steps is not None:
data["num_inference_steps"] = num_inference_steps
if use_controlnet:
data["use_controlnet"] = "True"
# Prepare the image file
files = {
"image": open(image_path, "rb")
}
# Make API request
response = requests.post(url, data=data, files=files)
if response.status_code == 200:
# Convert response to PIL image
image = Image.open(io.BytesIO(response.content))
return image
else:
print(f"Error: {response.status_code}")
print(response.text)
return None
if __name__ == "__main__":
# Example usage
print("Text to Image example:")
# Can call without arguments to use server defaults
image = text_to_image()
if image:
image.save("text2img_output.png")
print("Image saved as text2img_output.png")
# Or with specific parameters
# image = text_to_image("A beautiful mountain landscape at sunset")
print("Image to Image example (requires an input image):")
# Uncomment and modify path to run:
# result = image_to_image("input.png") # Uses default prompt
# if result:
# result.save("img2img_output.png")
# print("Image saved as img2img_output.png")
# Example with ControlNet depth-based transformation:
# result = image_to_image("input.png", prompt="A futuristic cityscape", use_controlnet=True)
# if result:
# result.save("controlnet_output.png")
# print("Image saved as controlnet_output.png")
|