import streamlit as st import torch from PIL import Image import io import os import subprocess import sys # Set page config st.set_page_config( page_title="Portrait Generator", page_icon="🖼️", layout="centered" ) # App title and description st.title("AI Portrait Generator") st.markdown("Generate beautiful portraits using the AWPortraitCN2 model") # Check and install compatible versions if needed @st.cache_resource def install_dependencies(): try: # Try to import diffusers to see if it works import diffusers return True except ImportError: st.warning("Installing required packages. This may take a few minutes...") # Install specific versions known to work together subprocess.check_call([ sys.executable, "-m", "pip", "install", "huggingface-hub==0.16.4", "diffusers==0.20.0", "transformers==4.32.0", "accelerate==0.21.0" ]) return True except Exception as e: st.error(f"Failed to install dependencies: {e}") return False # Try to install compatible dependencies dependencies_installed = install_dependencies() # If dependencies installation failed, show message and exit if not dependencies_installed: st.error("Could not set up the required environment. Please check the logs.") st.stop() # Model parameters with st.sidebar: st.header("Generation Settings") steps = st.slider("Inference Steps", min_value=20, max_value=100, value=40) guidance_scale = st.slider("Guidance Scale", min_value=1.0, max_value=15.0, value=7.5, step=0.5) negative_prompt = st.text_area( "Negative Prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, watermark, signature, out of frame" ) seed = st.number_input("Random Seed (leave at -1 for random)", min_value=-1, value=-1) # Main prompt input prompt = st.text_area( "Describe the portrait you want to generate", value="Masterpiece portrait of a beautiful young woman with flowing hair, detailed face, photorealistic, 8k, professional photography" ) # Function to load model with proper dependencies @st.cache_resource def load_model(): try: from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained( "Shakker-Labs/AWPortraitCN2", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, use_safetensors=True ) # Move to GPU if available device = "cuda" if torch.cuda.is_available() else "cpu" pipeline = pipeline.to(device) return pipeline except Exception as e: st.error(f"Error loading model: {e}") return None # Generate button if st.button("Generate Portrait", type="primary"): with st.spinner("Loading model and generating portrait..."): try: # Load the model pipeline = load_model() if pipeline is None: st.error("Failed to load the model. Please check the logs.") st.stop() # Set seed if specified generator = None if seed != -1: device = "cuda" if torch.cuda.is_available() else "cpu" generator = torch.Generator(device).manual_seed(seed) # Generate the image image = pipeline( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator ).images[0] # Display the generated image st.image(image, caption="Generated Portrait", use_column_width=True) # Option to download buf = io.BytesIO() image.save(buf, format="PNG") byte_im = buf.getvalue() st.download_button( label="Download Portrait", data=byte_im, file_name="generated_portrait.png", mime="image/png" ) except Exception as e: st.error(f"An error occurred: {e}") st.info("Make sure you have enough GPU memory and the required dependencies installed.") # Add requirements info at the bottom st.markdown("---") st.markdown(""" ### About This App This app uses the AWPortraitCN2 model to generate AI portraits based on your text prompts. Adjust the settings in the sidebar to customize your generation. """)