from huggingface_hub import InferenceClient from PIL import Image import gradio as gr import os # Load token from environment token = os.environ["HF_TOKEN"] # Create the client client = InferenceClient( model="artificialguybr/TshirtDesignRedmond-V2", provider="fal-ai", token=token, ) # Trigger word for model trigger_word = "T shirt design, TshirtDesignAF, " def generate_image(dress_type, fabric_type, color_type, design): base_description = ( f"Hyper-realistic scene of a single {color_type} {dress_type} " f"made of {fabric_type} with {design} printed on it, hanging neatly on a wooden hanger " f"against a clean matte concrete wall. The {dress_type} is centered, casting a soft natural shadow, " f"detailed fabric folds visible, studio lighting setup with soft diffused highlights, " f"4K resolution, shallow depth of field, ultra-clean minimalist aesthetic, fashion editorial style." ) full_prompt = f"{base_description} {trigger_word}" print("Generating image with:", full_prompt) image = client.text_to_image( prompt=full_prompt, negative_prompt="(worst quality, low quality, lowres, bad photo, ...)", num_inference_steps=30, guidance_scale=7.5, ) return image iface = gr.Interface( fn=generate_image, inputs=[ gr.Textbox(label="Dress Type"), gr.Textbox(label="Fabric Type"), gr.Textbox(label="Color Type"), gr.Textbox(label="Design"), ], outputs="image", title="TShirt Design XL Image Generator", description="Powered by Redmond.AI — Generate stunning T-shirt designs from simple attributes.", ) print("Launching Gradio interface...") iface.launch()