Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -725,733 +725,5 @@ with gr.Blocks(css=css) as demo:
|
|
725 |
outputs=[gallery, gr_flux_loras]
|
726 |
)
|
727 |
|
728 |
-
demo.queue(default_concurrency_limit=None)
|
729 |
-
demo.launch(allowed_paths=["examples/"])import gradio as gr
|
730 |
-
import numpy as np
|
731 |
-
import spaces
|
732 |
-
import torch
|
733 |
-
import random
|
734 |
-
import json
|
735 |
-
import os
|
736 |
-
from PIL import Image
|
737 |
-
from diffusers import FluxKontextPipeline
|
738 |
-
from diffusers.utils import load_image
|
739 |
-
from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, list_repo_files
|
740 |
-
from safetensors.torch import load_file
|
741 |
-
import requests
|
742 |
-
import re
|
743 |
-
|
744 |
-
# Load Kontext model
|
745 |
-
MAX_SEED = np.iinfo(np.int32).max
|
746 |
-
|
747 |
-
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
748 |
-
|
749 |
-
# Load LoRA data
|
750 |
-
flux_loras_raw = [
|
751 |
-
{
|
752 |
-
"image": "examples/1.png",
|
753 |
-
"title": "Studio Ghibli",
|
754 |
-
"repo": "openfree/flux-chatgpt-ghibli-lora",
|
755 |
-
"trigger_word": "ghibli",
|
756 |
-
"weights": "pytorch_lora_weights.safetensors",
|
757 |
-
"likes": 0
|
758 |
-
},
|
759 |
-
{
|
760 |
-
"image": "examples/2.png",
|
761 |
-
"title": "Winslow Homer",
|
762 |
-
"repo": "openfree/winslow-homer",
|
763 |
-
"trigger_word": "homer",
|
764 |
-
"weights": "pytorch_lora_weights.safetensors",
|
765 |
-
"likes": 0
|
766 |
-
},
|
767 |
-
{
|
768 |
-
"image": "examples/3.png",
|
769 |
-
"title": "Van Gogh",
|
770 |
-
"repo": "openfree/van-gogh",
|
771 |
-
"trigger_word": "gogh",
|
772 |
-
"weights": "pytorch_lora_weights.safetensors",
|
773 |
-
"likes": 0
|
774 |
-
},
|
775 |
-
{
|
776 |
-
"image": "examples/4.png",
|
777 |
-
"title": "Paul Cézanne",
|
778 |
-
"repo": "openfree/paul-cezanne",
|
779 |
-
"trigger_word": "Cezanne",
|
780 |
-
"weights": "pytorch_lora_weights.safetensors",
|
781 |
-
"likes": 0
|
782 |
-
},
|
783 |
-
{
|
784 |
-
"image": "examples/5.png",
|
785 |
-
"title": "Renoir",
|
786 |
-
"repo": "openfree/pierre-auguste-renoir",
|
787 |
-
"trigger_word": "Renoir",
|
788 |
-
"weights": "pytorch_lora_weights.safetensors",
|
789 |
-
"likes": 0
|
790 |
-
},
|
791 |
-
{
|
792 |
-
"image": "examples/6.png",
|
793 |
-
"title": "Claude Monet",
|
794 |
-
"repo": "openfree/claude-monet",
|
795 |
-
"trigger_word": "claude monet",
|
796 |
-
"weights": "pytorch_lora_weights.safetensors",
|
797 |
-
"likes": 0
|
798 |
-
},
|
799 |
-
{
|
800 |
-
"image": "examples/7.png",
|
801 |
-
"title": "Fantasy Art",
|
802 |
-
"repo": "openfree/myt-flux-fantasy",
|
803 |
-
"trigger_word": "fantasy",
|
804 |
-
"weights": "pytorch_lora_weights.safetensors",
|
805 |
-
"likes": 0
|
806 |
-
}
|
807 |
-
]
|
808 |
-
print(f"Loaded {len(flux_loras_raw)} LoRAs")
|
809 |
-
# Global variables for LoRA management
|
810 |
-
current_lora = None
|
811 |
-
lora_cache = {}
|
812 |
-
|
813 |
-
def load_lora_weights(repo_id, weights_filename):
|
814 |
-
"""Load LoRA weights from HuggingFace"""
|
815 |
-
try:
|
816 |
-
# First try with the specified filename
|
817 |
-
try:
|
818 |
-
lora_path = hf_hub_download(repo_id=repo_id, filename=weights_filename)
|
819 |
-
if repo_id not in lora_cache:
|
820 |
-
lora_cache[repo_id] = lora_path
|
821 |
-
return lora_path
|
822 |
-
except Exception as e:
|
823 |
-
print(f"Failed to load {weights_filename}, trying to find alternative LoRA files...")
|
824 |
-
|
825 |
-
# If the specified file doesn't exist, try to find any .safetensors file
|
826 |
-
from huggingface_hub import list_repo_files
|
827 |
-
try:
|
828 |
-
files = list_repo_files(repo_id)
|
829 |
-
safetensors_files = [f for f in files if f.endswith(('.safetensors', '.bin')) and 'lora' in f.lower()]
|
830 |
-
|
831 |
-
if not safetensors_files:
|
832 |
-
# Try without 'lora' in filename
|
833 |
-
safetensors_files = [f for f in files if f.endswith('.safetensors')]
|
834 |
-
|
835 |
-
if safetensors_files:
|
836 |
-
# Try the first available file
|
837 |
-
for file in safetensors_files:
|
838 |
-
try:
|
839 |
-
print(f"Trying alternative file: {file}")
|
840 |
-
lora_path = hf_hub_download(repo_id=repo_id, filename=file)
|
841 |
-
if repo_id not in lora_cache:
|
842 |
-
lora_cache[repo_id] = lora_path
|
843 |
-
print(f"Successfully loaded alternative LoRA file: {file}")
|
844 |
-
return lora_path
|
845 |
-
except:
|
846 |
-
continue
|
847 |
-
|
848 |
-
print(f"No suitable LoRA files found in {repo_id}")
|
849 |
-
return None
|
850 |
-
|
851 |
-
except Exception as list_error:
|
852 |
-
print(f"Error listing files in repo {repo_id}: {list_error}")
|
853 |
-
return None
|
854 |
-
|
855 |
-
except Exception as e:
|
856 |
-
print(f"Error loading LoRA from {repo_id}: {e}")
|
857 |
-
return None
|
858 |
-
|
859 |
-
def update_selection(selected_state: gr.SelectData, flux_loras):
|
860 |
-
"""Update UI when a LoRA is selected"""
|
861 |
-
if selected_state.index >= len(flux_loras):
|
862 |
-
return "### No LoRA selected", gr.update(), None
|
863 |
-
|
864 |
-
lora = flux_loras[selected_state.index]
|
865 |
-
lora_title = lora["title"]
|
866 |
-
lora_repo = lora["repo"]
|
867 |
-
trigger_word = lora["trigger_word"]
|
868 |
-
|
869 |
-
# Create a more informative selected text
|
870 |
-
updated_text = f"### 🎨 Selected Style: {lora_title}"
|
871 |
-
new_placeholder = f"Describe additional details, e.g., 'wearing a red hat' or 'smiling'"
|
872 |
-
|
873 |
-
return updated_text, gr.update(placeholder=new_placeholder), selected_state.index
|
874 |
-
|
875 |
-
def get_huggingface_lora(link):
|
876 |
-
"""Download LoRA from HuggingFace link"""
|
877 |
-
split_link = link.split("/")
|
878 |
-
if len(split_link) == 2:
|
879 |
-
try:
|
880 |
-
model_card = ModelCard.load(link)
|
881 |
-
trigger_word = model_card.data.get("instance_prompt", "")
|
882 |
-
|
883 |
-
# Try to find the correct safetensors file
|
884 |
-
files = list_repo_files(link)
|
885 |
-
safetensors_files = [f for f in files if f.endswith('.safetensors')]
|
886 |
-
|
887 |
-
# Prioritize files with 'lora' in the name
|
888 |
-
lora_files = [f for f in safetensors_files if 'lora' in f.lower()]
|
889 |
-
if lora_files:
|
890 |
-
safetensors_file = lora_files[0]
|
891 |
-
elif safetensors_files:
|
892 |
-
safetensors_file = safetensors_files[0]
|
893 |
-
else:
|
894 |
-
# Try .bin files as fallback
|
895 |
-
bin_files = [f for f in files if f.endswith('.bin') and 'lora' in f.lower()]
|
896 |
-
if bin_files:
|
897 |
-
safetensors_file = bin_files[0]
|
898 |
-
else:
|
899 |
-
safetensors_file = "pytorch_lora_weights.safetensors" # Default fallback
|
900 |
-
|
901 |
-
print(f"Found LoRA file: {safetensors_file} in {link}")
|
902 |
-
return split_link[1], safetensors_file, trigger_word
|
903 |
-
|
904 |
-
except Exception as e:
|
905 |
-
print(f"Error in get_huggingface_lora: {e}")
|
906 |
-
# Try basic detection
|
907 |
-
try:
|
908 |
-
files = list_repo_files(link)
|
909 |
-
safetensors_file = next((f for f in files if f.endswith('.safetensors')), "pytorch_lora_weights.safetensors")
|
910 |
-
return split_link[1], safetensors_file, ""
|
911 |
-
except:
|
912 |
-
raise Exception(f"Error loading LoRA: {e}")
|
913 |
-
else:
|
914 |
-
raise Exception("Invalid HuggingFace repository format")
|
915 |
-
|
916 |
-
def load_custom_lora(link):
|
917 |
-
"""Load custom LoRA from user input"""
|
918 |
-
if not link:
|
919 |
-
return gr.update(visible=False), "", gr.update(visible=False), None, gr.Gallery(selected_index=None), "### 🎨 Select an art style from the gallery", None
|
920 |
-
|
921 |
-
try:
|
922 |
-
repo_name, weights_file, trigger_word = get_huggingface_lora(link)
|
923 |
-
|
924 |
-
card = f'''
|
925 |
-
<div class="custom_lora_card">
|
926 |
-
<div style="display: flex; align-items: center; margin-bottom: 12px;">
|
927 |
-
<span style="font-size: 18px; margin-right: 8px;">✅</span>
|
928 |
-
<strong style="font-size: 16px;">Custom LoRA Loaded!</strong>
|
929 |
-
</div>
|
930 |
-
<div style="background: rgba(255, 255, 255, 0.8); padding: 12px; border-radius: 8px;">
|
931 |
-
<h4 style="margin: 0 0 8px 0; color: #333;">{repo_name}</h4>
|
932 |
-
<small style="color: #666;">{"Trigger: <code style='background: #f0f0f0; padding: 2px 6px; border-radius: 4px;'><b>"+trigger_word+"</b></code>" if trigger_word else "No trigger word found"}</small>
|
933 |
-
</div>
|
934 |
-
</div>
|
935 |
-
'''
|
936 |
-
|
937 |
-
custom_lora_data = {
|
938 |
-
"repo": link,
|
939 |
-
"weights": weights_file,
|
940 |
-
"trigger_word": trigger_word
|
941 |
-
}
|
942 |
-
|
943 |
-
return gr.update(visible=True), card, gr.update(visible=True), custom_lora_data, gr.Gallery(selected_index=None), f"🎨 Custom Style: {repo_name}", None
|
944 |
-
|
945 |
-
except Exception as e:
|
946 |
-
return gr.update(visible=True), f"Error: {str(e)}", gr.update(visible=False), None, gr.update(), "### 🎨 Select an art style from the gallery", None
|
947 |
-
|
948 |
-
def remove_custom_lora():
|
949 |
-
"""Remove custom LoRA"""
|
950 |
-
return "", gr.update(visible=False), gr.update(visible=False), None, None
|
951 |
-
|
952 |
-
def classify_gallery(flux_loras):
|
953 |
-
"""Sort gallery by likes"""
|
954 |
-
try:
|
955 |
-
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
956 |
-
gallery_items = []
|
957 |
-
|
958 |
-
for item in sorted_gallery:
|
959 |
-
if "image" in item and "title" in item:
|
960 |
-
image_url = item["image"]
|
961 |
-
title = item["title"]
|
962 |
-
|
963 |
-
# Try to load local images with PIL
|
964 |
-
if isinstance(image_url, str) and image_url.startswith("examples/"):
|
965 |
-
try:
|
966 |
-
import os
|
967 |
-
# Try different possible paths
|
968 |
-
possible_paths = [
|
969 |
-
image_url,
|
970 |
-
os.path.join(os.getcwd(), image_url),
|
971 |
-
f"/home/user/app/{image_url}"
|
972 |
-
]
|
973 |
-
|
974 |
-
image_loaded = False
|
975 |
-
for path in possible_paths:
|
976 |
-
if os.path.exists(path):
|
977 |
-
try:
|
978 |
-
pil_image = Image.open(path)
|
979 |
-
gallery_items.append((pil_image, title))
|
980 |
-
image_loaded = True
|
981 |
-
print(f"✓ Successfully loaded image from: {path}")
|
982 |
-
break
|
983 |
-
except Exception as e:
|
984 |
-
print(f"Failed to open image at {path}: {e}")
|
985 |
-
|
986 |
-
if not image_loaded:
|
987 |
-
print(f"✗ Could not load image: {image_url}")
|
988 |
-
# Use the original path as fallback
|
989 |
-
gallery_items.append((image_url, title))
|
990 |
-
except Exception as e:
|
991 |
-
print(f"Error processing image {image_url}: {e}")
|
992 |
-
gallery_items.append((image_url, title))
|
993 |
-
else:
|
994 |
-
# For URLs or other paths, use as-is
|
995 |
-
gallery_items.append((image_url, title))
|
996 |
-
|
997 |
-
if not gallery_items:
|
998 |
-
print("No gallery items found")
|
999 |
-
return [], sorted_gallery
|
1000 |
-
|
1001 |
-
print(f"Gallery loaded with {len(gallery_items)} items")
|
1002 |
-
return gallery_items, sorted_gallery
|
1003 |
-
except Exception as e:
|
1004 |
-
print(f"Error in classify_gallery: {e}")
|
1005 |
-
import traceback
|
1006 |
-
traceback.print_exc()
|
1007 |
-
return [], []
|
1008 |
-
|
1009 |
-
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
1010 |
-
"""Wrapper function to handle state serialization"""
|
1011 |
-
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, flux_loras, progress)
|
1012 |
-
|
1013 |
-
@spaces.GPU
|
1014 |
-
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
1015 |
-
"""Generate image with selected LoRA"""
|
1016 |
-
global current_lora, pipe
|
1017 |
-
|
1018 |
-
# Check if input image is provided
|
1019 |
-
if input_image is None:
|
1020 |
-
gr.Warning("Please upload your portrait photo first! 📸")
|
1021 |
-
return None, seed, gr.update(visible=False)
|
1022 |
-
|
1023 |
-
if randomize_seed:
|
1024 |
-
seed = random.randint(0, MAX_SEED)
|
1025 |
-
|
1026 |
-
# Determine which LoRA to use
|
1027 |
-
lora_to_use = None
|
1028 |
-
if custom_lora:
|
1029 |
-
lora_to_use = custom_lora
|
1030 |
-
elif selected_index is not None and flux_loras and selected_index < len(flux_loras):
|
1031 |
-
lora_to_use = flux_loras[selected_index]
|
1032 |
-
# Load LoRA if needed
|
1033 |
-
if lora_to_use and lora_to_use != current_lora:
|
1034 |
-
try:
|
1035 |
-
# Unload current LoRA
|
1036 |
-
if current_lora:
|
1037 |
-
pipe.unload_lora_weights()
|
1038 |
-
print(f"Unloaded previous LoRA")
|
1039 |
-
|
1040 |
-
# Load new LoRA
|
1041 |
-
repo_id = lora_to_use.get("repo", "unknown")
|
1042 |
-
weights_file = lora_to_use.get("weights", "pytorch_lora_weights.safetensors")
|
1043 |
-
print(f"Loading LoRA: {repo_id} with weights: {weights_file}")
|
1044 |
-
|
1045 |
-
lora_path = load_lora_weights(repo_id, weights_file)
|
1046 |
-
if lora_path:
|
1047 |
-
pipe.load_lora_weights(lora_path, adapter_name="selected_lora")
|
1048 |
-
pipe.set_adapters(["selected_lora"], adapter_weights=[lora_scale])
|
1049 |
-
print(f"Successfully loaded: {lora_path} with scale {lora_scale}")
|
1050 |
-
current_lora = lora_to_use
|
1051 |
-
else:
|
1052 |
-
print(f"Failed to load LoRA from {repo_id}")
|
1053 |
-
gr.Warning(f"Failed to load {lora_to_use.get('title', 'style')}. Please try a different art style.")
|
1054 |
-
return None, seed, gr.update(visible=False)
|
1055 |
-
|
1056 |
-
except Exception as e:
|
1057 |
-
print(f"Error loading LoRA: {e}")
|
1058 |
-
# Continue without LoRA
|
1059 |
-
else:
|
1060 |
-
if lora_to_use:
|
1061 |
-
print(f"Using already loaded LoRA: {lora_to_use.get('repo', 'unknown')}")
|
1062 |
-
|
1063 |
-
try:
|
1064 |
-
# Convert image to RGB
|
1065 |
-
input_image = input_image.convert("RGB")
|
1066 |
-
except Exception as e:
|
1067 |
-
print(f"Error processing image: {e}")
|
1068 |
-
gr.Warning("Error processing the uploaded image. Please try a different photo. 📸")
|
1069 |
-
return None, seed, gr.update(visible=False)
|
1070 |
-
|
1071 |
-
# Check if LoRA is selected
|
1072 |
-
if lora_to_use is None:
|
1073 |
-
gr.Warning("Please select an art style from the gallery first! 🎨")
|
1074 |
-
return None, seed, gr.update(visible=False)
|
1075 |
-
|
1076 |
-
# Add trigger word to prompt
|
1077 |
-
trigger_word = lora_to_use.get("trigger_word", "")
|
1078 |
-
|
1079 |
-
# Special handling for different art styles
|
1080 |
-
if trigger_word == "ghibli":
|
1081 |
-
prompt = f"Create a Studio Ghibli anime style portrait of the person in the photo, {prompt}. Maintain the facial identity while transforming into whimsical anime art style."
|
1082 |
-
elif trigger_word == "homer":
|
1083 |
-
prompt = f"Paint the person in Winslow Homer's American realist style, {prompt}. Keep facial features while applying watercolor and marine art techniques."
|
1084 |
-
elif trigger_word == "gogh":
|
1085 |
-
prompt = f"Transform the portrait into Van Gogh's post-impressionist style with swirling brushstrokes, {prompt}. Maintain facial identity with expressive colors."
|
1086 |
-
elif trigger_word == "Cezanne":
|
1087 |
-
prompt = f"Render the person in Paul Cézanne's geometric post-impressionist style, {prompt}. Keep facial structure while applying structured brushwork."
|
1088 |
-
elif trigger_word == "Renoir":
|
1089 |
-
prompt = f"Paint the portrait in Pierre-Auguste Renoir's impressionist style with soft light, {prompt}. Maintain identity with luminous skin tones."
|
1090 |
-
elif trigger_word == "claude monet":
|
1091 |
-
prompt = f"Create an impressionist portrait in Claude Monet's style with visible brushstrokes, {prompt}. Keep facial features while using light and color."
|
1092 |
-
elif trigger_word == "fantasy":
|
1093 |
-
prompt = f"Transform into an epic fantasy character portrait, {prompt}. Maintain facial identity while adding magical and fantastical elements."
|
1094 |
-
elif trigger_word == ", How2Draw":
|
1095 |
-
prompt = f"create a How2Draw sketch of the person of the photo {prompt}, maintain the facial identity of the person and general features"
|
1096 |
-
elif trigger_word == ", video game screenshot in the style of THSMS":
|
1097 |
-
prompt = f"create a video game screenshot in the style of THSMS with the person from the photo, {prompt}. maintain the facial identity of the person and general features"
|
1098 |
-
else:
|
1099 |
-
prompt = f"convert the style of this portrait photo to {trigger_word} while maintaining the identity of the person. {prompt}. Make sure to maintain the person's facial identity and features, while still changing the overall style to {trigger_word}."
|
1100 |
-
|
1101 |
-
try:
|
1102 |
-
image = pipe(
|
1103 |
-
image=input_image,
|
1104 |
-
prompt=prompt,
|
1105 |
-
guidance_scale=guidance_scale,
|
1106 |
-
generator=torch.Generator().manual_seed(seed),
|
1107 |
-
).images[0]
|
1108 |
-
|
1109 |
-
return image, seed, gr.update(visible=True)
|
1110 |
-
|
1111 |
-
except Exception as e:
|
1112 |
-
print(f"Error during inference: {e}")
|
1113 |
-
return None, seed, gr.update(visible=False)
|
1114 |
-
|
1115 |
-
# CSS styling with beautiful gradient pastel design
|
1116 |
-
css = """
|
1117 |
-
/* Global background and container styling */
|
1118 |
-
.gradio-container {
|
1119 |
-
background: linear-gradient(135deg, #ffeef8 0%, #e6f3ff 25%, #fff4e6 50%, #f0e6ff 75%, #e6fff9 100%);
|
1120 |
-
font-family: 'Inter', sans-serif;
|
1121 |
-
}
|
1122 |
-
|
1123 |
-
/* Main app container */
|
1124 |
-
#main_app {
|
1125 |
-
display: flex;
|
1126 |
-
gap: 24px;
|
1127 |
-
padding: 20px;
|
1128 |
-
background: rgba(255, 255, 255, 0.85);
|
1129 |
-
backdrop-filter: blur(20px);
|
1130 |
-
border-radius: 24px;
|
1131 |
-
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.08);
|
1132 |
-
}
|
1133 |
-
|
1134 |
-
/* Box column styling */
|
1135 |
-
#box_column {
|
1136 |
-
min-width: 400px;
|
1137 |
-
}
|
1138 |
-
|
1139 |
-
/* Gallery box with glassmorphism */
|
1140 |
-
#gallery_box {
|
1141 |
-
background: linear-gradient(135deg, rgba(255, 255, 255, 0.9) 0%, rgba(240, 248, 255, 0.9) 100%);
|
1142 |
-
border-radius: 20px;
|
1143 |
-
padding: 20px;
|
1144 |
-
box-shadow: 0 8px 32px rgba(135, 206, 250, 0.2);
|
1145 |
-
border: 1px solid rgba(255, 255, 255, 0.8);
|
1146 |
-
}
|
1147 |
-
|
1148 |
-
/* Input image styling */
|
1149 |
-
.image-container {
|
1150 |
-
border-radius: 16px;
|
1151 |
-
overflow: hidden;
|
1152 |
-
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
|
1153 |
-
}
|
1154 |
-
|
1155 |
-
/* Gallery styling */
|
1156 |
-
#gallery {
|
1157 |
-
overflow-y: scroll !important;
|
1158 |
-
max-height: 400px;
|
1159 |
-
padding: 12px;
|
1160 |
-
background: rgba(255, 255, 255, 0.5);
|
1161 |
-
border-radius: 16px;
|
1162 |
-
scrollbar-width: thin;
|
1163 |
-
scrollbar-color: #ddd6fe #f5f3ff;
|
1164 |
-
}
|
1165 |
-
|
1166 |
-
#gallery::-webkit-scrollbar {
|
1167 |
-
width: 8px;
|
1168 |
-
}
|
1169 |
-
|
1170 |
-
#gallery::-webkit-scrollbar-track {
|
1171 |
-
background: #f5f3ff;
|
1172 |
-
border-radius: 10px;
|
1173 |
-
}
|
1174 |
-
|
1175 |
-
#gallery::-webkit-scrollbar-thumb {
|
1176 |
-
background: linear-gradient(180deg, #c7d2fe 0%, #ddd6fe 100%);
|
1177 |
-
border-radius: 10px;
|
1178 |
-
}
|
1179 |
-
|
1180 |
-
/* Selected LoRA text */
|
1181 |
-
#selected_lora {
|
1182 |
-
background: linear-gradient(135deg, #818cf8 0%, #a78bfa 100%);
|
1183 |
-
-webkit-background-clip: text;
|
1184 |
-
-webkit-text-fill-color: transparent;
|
1185 |
-
background-clip: text;
|
1186 |
-
font-weight: 700;
|
1187 |
-
font-size: 18px;
|
1188 |
-
text-align: center;
|
1189 |
-
padding: 12px;
|
1190 |
-
margin-bottom: 16px;
|
1191 |
-
}
|
1192 |
-
|
1193 |
-
/* Prompt input field */
|
1194 |
-
#prompt {
|
1195 |
-
flex-grow: 1;
|
1196 |
-
border: 2px solid transparent;
|
1197 |
-
background: linear-gradient(white, white) padding-box,
|
1198 |
-
linear-gradient(135deg, #a5b4fc 0%, #e9d5ff 100%) border-box;
|
1199 |
-
border-radius: 12px;
|
1200 |
-
padding: 12px 16px;
|
1201 |
-
font-size: 16px;
|
1202 |
-
transition: all 0.3s ease;
|
1203 |
-
}
|
1204 |
-
|
1205 |
-
#prompt:focus {
|
1206 |
-
box-shadow: 0 0 0 4px rgba(165, 180, 252, 0.25);
|
1207 |
-
}
|
1208 |
-
|
1209 |
-
/* Run button with animated gradient */
|
1210 |
-
#run_button {
|
1211 |
-
background: linear-gradient(135deg, #a78bfa 0%, #818cf8 25%, #60a5fa 50%, #34d399 75%, #fbbf24 100%);
|
1212 |
-
background-size: 200% 200%;
|
1213 |
-
animation: gradient-shift 3s ease infinite;
|
1214 |
-
color: white;
|
1215 |
-
border: none;
|
1216 |
-
padding: 12px 32px;
|
1217 |
-
border-radius: 12px;
|
1218 |
-
font-weight: 600;
|
1219 |
-
font-size: 16px;
|
1220 |
-
cursor: pointer;
|
1221 |
-
transition: all 0.3s ease;
|
1222 |
-
box-shadow: 0 4px 20px rgba(167, 139, 250, 0.4);
|
1223 |
-
}
|
1224 |
-
|
1225 |
-
#run_button:hover {
|
1226 |
-
transform: translateY(-2px);
|
1227 |
-
box-shadow: 0 6px 30px rgba(167, 139, 250, 0.6);
|
1228 |
-
}
|
1229 |
-
|
1230 |
-
@keyframes gradient-shift {
|
1231 |
-
0% { background-position: 0% 50%; }
|
1232 |
-
50% { background-position: 100% 50%; }
|
1233 |
-
100% { background-position: 0% 50%; }
|
1234 |
-
}
|
1235 |
-
|
1236 |
-
/* Custom LoRA card */
|
1237 |
-
.custom_lora_card {
|
1238 |
-
background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
|
1239 |
-
border: 1px solid #fcd34d;
|
1240 |
-
border-radius: 12px;
|
1241 |
-
padding: 16px;
|
1242 |
-
margin: 12px 0;
|
1243 |
-
box-shadow: 0 4px 12px rgba(251, 191, 36, 0.2);
|
1244 |
-
}
|
1245 |
-
|
1246 |
-
/* Result image container */
|
1247 |
-
.output-image {
|
1248 |
-
border-radius: 16px;
|
1249 |
-
overflow: hidden;
|
1250 |
-
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.12);
|
1251 |
-
margin-top: 20px;
|
1252 |
-
}
|
1253 |
-
|
1254 |
-
/* Accordion styling */
|
1255 |
-
.accordion {
|
1256 |
-
background: rgba(249, 250, 251, 0.9);
|
1257 |
-
border-radius: 12px;
|
1258 |
-
border: 1px solid rgba(229, 231, 235, 0.8);
|
1259 |
-
margin-top: 16px;
|
1260 |
-
}
|
1261 |
-
|
1262 |
-
/* Slider styling */
|
1263 |
-
.slider-container {
|
1264 |
-
padding: 8px 0;
|
1265 |
-
}
|
1266 |
-
|
1267 |
-
input[type="range"] {
|
1268 |
-
background: linear-gradient(to right, #e0e7ff 0%, #c7d2fe 100%);
|
1269 |
-
border-radius: 8px;
|
1270 |
-
height: 6px;
|
1271 |
-
}
|
1272 |
-
|
1273 |
-
/* Reuse button */
|
1274 |
-
button:not(#run_button) {
|
1275 |
-
background: linear-gradient(135deg, #f0abfc 0%, #c084fc 100%);
|
1276 |
-
color: white;
|
1277 |
-
border: none;
|
1278 |
-
padding: 8px 20px;
|
1279 |
-
border-radius: 8px;
|
1280 |
-
font-weight: 500;
|
1281 |
-
cursor: pointer;
|
1282 |
-
transition: all 0.3s ease;
|
1283 |
-
}
|
1284 |
-
|
1285 |
-
button:not(#run_button):hover {
|
1286 |
-
transform: translateY(-1px);
|
1287 |
-
box-shadow: 0 4px 16px rgba(192, 132, 252, 0.4);
|
1288 |
-
}
|
1289 |
-
|
1290 |
-
/* Title styling */
|
1291 |
-
h1 {
|
1292 |
-
background: linear-gradient(135deg, #6366f1 0%, #a855f7 25%, #ec4899 50%, #f43f5e 75%, #f59e0b 100%);
|
1293 |
-
-webkit-background-clip: text;
|
1294 |
-
-webkit-text-fill-color: transparent;
|
1295 |
-
background-clip: text;
|
1296 |
-
text-align: center;
|
1297 |
-
font-size: 3.5rem;
|
1298 |
-
font-weight: 800;
|
1299 |
-
margin-bottom: 8px;
|
1300 |
-
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
|
1301 |
-
}
|
1302 |
-
|
1303 |
-
h1 small {
|
1304 |
-
display: block;
|
1305 |
-
background: linear-gradient(135deg, #94a3b8 0%, #64748b 100%);
|
1306 |
-
-webkit-background-clip: text;
|
1307 |
-
-webkit-text-fill-color: transparent;
|
1308 |
-
background-clip: text;
|
1309 |
-
font-size: 1rem;
|
1310 |
-
font-weight: 500;
|
1311 |
-
margin-top: 8px;
|
1312 |
-
}
|
1313 |
-
|
1314 |
-
/* Checkbox styling */
|
1315 |
-
input[type="checkbox"] {
|
1316 |
-
accent-color: #8b5cf6;
|
1317 |
-
}
|
1318 |
-
|
1319 |
-
/* Label styling */
|
1320 |
-
label {
|
1321 |
-
color: #4b5563;
|
1322 |
-
font-weight: 500;
|
1323 |
-
}
|
1324 |
-
|
1325 |
-
/* Group containers */
|
1326 |
-
.gr-group {
|
1327 |
-
background: rgba(255, 255, 255, 0.7);
|
1328 |
-
border-radius: 16px;
|
1329 |
-
padding: 20px;
|
1330 |
-
border: 1px solid rgba(255, 255, 255, 0.9);
|
1331 |
-
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.05);
|
1332 |
-
}
|
1333 |
-
"""
|
1334 |
-
|
1335 |
-
# Create Gradio interface
|
1336 |
-
with gr.Blocks(css=css) as demo:
|
1337 |
-
gr_flux_loras = gr.State(value=flux_loras_raw)
|
1338 |
-
|
1339 |
-
title = gr.HTML(
|
1340 |
-
"""<h1>✨ Flux-Kontext FaceLORA
|
1341 |
-
<small>Transform your portraits with AI-powered style transfer 🎨</small></h1>""",
|
1342 |
-
)
|
1343 |
-
|
1344 |
-
selected_state = gr.State(value=None)
|
1345 |
-
custom_loaded_lora = gr.State(value=None)
|
1346 |
-
|
1347 |
-
with gr.Row(elem_id="main_app"):
|
1348 |
-
with gr.Column(scale=4, elem_id="box_column"):
|
1349 |
-
with gr.Group(elem_id="gallery_box"):
|
1350 |
-
input_image = gr.Image(label="Upload your portrait photo 📸", type="pil", height=300)
|
1351 |
-
|
1352 |
-
gallery = gr.Gallery(
|
1353 |
-
label="Choose Your Art Style",
|
1354 |
-
allow_preview=False,
|
1355 |
-
columns=3,
|
1356 |
-
elem_id="gallery",
|
1357 |
-
show_share_button=False,
|
1358 |
-
height=400
|
1359 |
-
)
|
1360 |
-
|
1361 |
-
custom_model = gr.Textbox(
|
1362 |
-
label="🔗 Or use a custom LoRA from HuggingFace",
|
1363 |
-
placeholder="e.g., username/lora-name",
|
1364 |
-
visible=True
|
1365 |
-
)
|
1366 |
-
custom_model_card = gr.HTML(visible=False)
|
1367 |
-
custom_model_button = gr.Button("❌ Remove custom LoRA", visible=False)
|
1368 |
-
|
1369 |
-
with gr.Column(scale=5):
|
1370 |
-
with gr.Row():
|
1371 |
-
prompt = gr.Textbox(
|
1372 |
-
label="Additional Details (optional)",
|
1373 |
-
show_label=False,
|
1374 |
-
lines=1,
|
1375 |
-
max_lines=1,
|
1376 |
-
placeholder="Describe additional details, e.g., 'wearing a red hat' or 'smiling'",
|
1377 |
-
elem_id="prompt"
|
1378 |
-
)
|
1379 |
-
run_button = gr.Button("Generate ✨", elem_id="run_button")
|
1380 |
-
|
1381 |
-
result = gr.Image(label="Your Artistic Portrait", interactive=False)
|
1382 |
-
reuse_button = gr.Button("🔄 Reuse this image", visible=False)
|
1383 |
-
|
1384 |
-
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
1385 |
-
lora_scale = gr.Slider(
|
1386 |
-
label="Style Strength",
|
1387 |
-
minimum=0,
|
1388 |
-
maximum=2,
|
1389 |
-
step=0.1,
|
1390 |
-
value=1.0,
|
1391 |
-
info="How strongly to apply the art style (1.0 = balanced)"
|
1392 |
-
)
|
1393 |
-
seed = gr.Slider(
|
1394 |
-
label="Random Seed",
|
1395 |
-
minimum=0,
|
1396 |
-
maximum=MAX_SEED,
|
1397 |
-
step=1,
|
1398 |
-
value=0,
|
1399 |
-
info="Set to 0 for random results"
|
1400 |
-
)
|
1401 |
-
randomize_seed = gr.Checkbox(label="🎲 Randomize seed for each generation", value=True)
|
1402 |
-
guidance_scale = gr.Slider(
|
1403 |
-
label="Image Guidance",
|
1404 |
-
minimum=1,
|
1405 |
-
maximum=10,
|
1406 |
-
step=0.1,
|
1407 |
-
value=2.5,
|
1408 |
-
info="How closely to follow the input image (lower = more creative)"
|
1409 |
-
)
|
1410 |
-
|
1411 |
-
prompt_title = gr.Markdown(
|
1412 |
-
value="### 🎨 Select an art style from the gallery",
|
1413 |
-
visible=True,
|
1414 |
-
elem_id="selected_lora",
|
1415 |
-
)
|
1416 |
-
|
1417 |
-
# Event handlers
|
1418 |
-
custom_model.input(
|
1419 |
-
fn=load_custom_lora,
|
1420 |
-
inputs=[custom_model],
|
1421 |
-
outputs=[custom_model_card, custom_model_card, custom_model_button, custom_loaded_lora, gallery, prompt_title, selected_state],
|
1422 |
-
)
|
1423 |
-
|
1424 |
-
custom_model_button.click(
|
1425 |
-
fn=remove_custom_lora,
|
1426 |
-
outputs=[custom_model, custom_model_button, custom_model_card, custom_loaded_lora, selected_state]
|
1427 |
-
)
|
1428 |
-
|
1429 |
-
gallery.select(
|
1430 |
-
fn=update_selection,
|
1431 |
-
inputs=[gr_flux_loras],
|
1432 |
-
outputs=[prompt_title, prompt, selected_state],
|
1433 |
-
show_progress=False
|
1434 |
-
)
|
1435 |
-
|
1436 |
-
gr.on(
|
1437 |
-
triggers=[run_button.click, prompt.submit],
|
1438 |
-
fn=infer_with_lora_wrapper,
|
1439 |
-
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, gr_flux_loras],
|
1440 |
-
outputs=[result, seed, reuse_button]
|
1441 |
-
)
|
1442 |
-
|
1443 |
-
reuse_button.click(
|
1444 |
-
fn=lambda image: image,
|
1445 |
-
inputs=[result],
|
1446 |
-
outputs=[input_image]
|
1447 |
-
)
|
1448 |
-
|
1449 |
-
# Initialize gallery
|
1450 |
-
demo.load(
|
1451 |
-
fn=classify_gallery,
|
1452 |
-
inputs=[gr_flux_loras],
|
1453 |
-
outputs=[gallery, gr_flux_loras]
|
1454 |
-
)
|
1455 |
-
|
1456 |
demo.queue(default_concurrency_limit=None)
|
1457 |
demo.launch(allowed_paths=["examples/"])
|
|
|
725 |
outputs=[gallery, gr_flux_loras]
|
726 |
)
|
727 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
728 |
demo.queue(default_concurrency_limit=None)
|
729 |
demo.launch(allowed_paths=["examples/"])
|