# Existing imports import gradio as gr import requests import io from PIL import Image import json import os import logging # Initialize logging logging.basicConfig(level=logging.DEBUG) # Load LoRAs from JSON with open('loras.json', 'r') as f: loras = json.load(f) def update_selection(selected_state: gr.SelectData): logging.debug(f"Inside update_selection, selected_state: {selected_state}") selected_lora_index = selected_state['index'] updated_text = loras[selected_lora_index]['title'] instance_prompt = "Your custom instance prompt here" new_placeholder = "Your new placeholder here" use_with_diffusers = "Your use_with_diffusers here" use_with_uis = "Your use_with_uis here" logging.debug(f"Updated selected_state: {selected_state}") return ( updated_text, instance_prompt, gr.update(placeholder=new_placeholder), selected_state, use_with_diffusers, use_with_uis, ) def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)): logging.debug(f"Inside run_lora, selected_state: {selected_state}") if not selected_state: logging.error("selected_state is None or empty.") raise gr.Error("You must select a LoRA") selected_lora_index = selected_state['index'] selected_lora = loras[selected_lora_index] api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" trigger_word = selected_lora["trigger_word"] token = os.getenv("API_TOKEN") payload = {"inputs": f"{prompt} {trigger_word}"} # Gradio UI with gr.Blocks(css="custom.css") as app: title = gr.HTML("