Spaces:
				
			
			
	
			
			
		Paused
		
	
	
	
			
			
	
	
	
	
		
		
		Paused
		
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | 
         @@ -14,7 +14,7 @@ if not torch.cuda.is_available(): 
     | 
|
| 14 | 
         
             
                DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
         
     | 
| 15 | 
         | 
| 16 | 
         
             
            MAX_SEED = np.iinfo(np.int32).max
         
     | 
| 17 | 
         
            -
            CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", " 
     | 
| 18 | 
         
             
            MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
         
     | 
| 19 | 
         
             
            USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
         
     | 
| 20 | 
         
             
            ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
         
     | 
| 
         @@ -57,7 +57,7 @@ def generate( 
     | 
|
| 57 | 
         
             
                if style=="BEST" : 
         
     | 
| 58 | 
         
             
                    pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle2")
         
     | 
| 59 | 
         
             
                    pipe.load_lora_weights("ehristoforu/dalle-3-xl", weight_name="dalle-3-xl-lora-v1.safetensors", adapter_name="dalle1")
         
     | 
| 60 | 
         
            -
                    pipe.set_adapters(["dalle2","dalle1"], adapter_weights=[ 
     | 
| 61 | 
         
             
                elif style=="Origami":
         
     | 
| 62 | 
         
             
                    pipe.load_lora_weights("RalFinger/origami-style-sdxl-lora", weight_name="ral-orgmi-sdxl.safetensors", adapter_name="origami")
         
     | 
| 63 | 
         
             
                    pipe.set_adapters(["origami"], adapter_weights=[2])
         
     | 
| 
         @@ -74,15 +74,15 @@ def generate( 
     | 
|
| 74 | 
         
             
                    pipe.load_lora_weights("artificialguybr/LogoRedmond-LogoLoraForSDXL", weight_name="LogoRedmond_LogoRedAF.safetensors", adapter_name="pixel")
         
     | 
| 75 | 
         
             
                    pipe.set_adapters(["lora", "pixel"], adapter_weights=[0.5, 1.2])
         
     | 
| 76 | 
         
             
                else: 
         
     | 
| 77 | 
         
            -
                    pipe.load_lora_weights( 
     | 
| 78 | 
         
            -
                     
     | 
| 79 | 
         
            -
             
     | 
| 80 | 
         
             
                pipe.to("cuda")
         
     | 
| 81 | 
         
             
                seed = int(randomize_seed_fn(seed, randomize_seed))
         
     | 
| 82 | 
         
             
                generator = torch.Generator().manual_seed(seed)   
         
     | 
| 83 | 
         | 
| 84 | 
         
             
                options = {
         
     | 
| 85 | 
         
             
                    "prompt":prompt,
         
     | 
| 
         | 
|
| 86 | 
         
             
                    "negative_prompt":negative_prompt,
         
     | 
| 87 | 
         
             
                    "width":width,
         
     | 
| 88 | 
         
             
                    "height":height,
         
     | 
| 
         | 
|
| 14 | 
         
             
                DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
         
     | 
| 15 | 
         | 
| 16 | 
         
             
            MAX_SEED = np.iinfo(np.int32).max
         
     | 
| 17 | 
         
            +
            CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
         
     | 
| 18 | 
         
             
            MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
         
     | 
| 19 | 
         
             
            USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
         
     | 
| 20 | 
         
             
            ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
         
     | 
| 
         | 
|
| 57 | 
         
             
                if style=="BEST" : 
         
     | 
| 58 | 
         
             
                    pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle2")
         
     | 
| 59 | 
         
             
                    pipe.load_lora_weights("ehristoforu/dalle-3-xl", weight_name="dalle-3-xl-lora-v1.safetensors", adapter_name="dalle1")
         
     | 
| 60 | 
         
            +
                    pipe.set_adapters(["dalle2","dalle1"], adapter_weights=[0.7, 0.3])
         
     | 
| 61 | 
         
             
                elif style=="Origami":
         
     | 
| 62 | 
         
             
                    pipe.load_lora_weights("RalFinger/origami-style-sdxl-lora", weight_name="ral-orgmi-sdxl.safetensors", adapter_name="origami")
         
     | 
| 63 | 
         
             
                    pipe.set_adapters(["origami"], adapter_weights=[2])
         
     | 
| 
         | 
|
| 74 | 
         
             
                    pipe.load_lora_weights("artificialguybr/LogoRedmond-LogoLoraForSDXL", weight_name="LogoRedmond_LogoRedAF.safetensors", adapter_name="pixel")
         
     | 
| 75 | 
         
             
                    pipe.set_adapters(["lora", "pixel"], adapter_weights=[0.5, 1.2])
         
     | 
| 76 | 
         
             
                else: 
         
     | 
| 77 | 
         
            +
                    pipe.load_lora_weights()
         
     | 
| 78 | 
         
            +
                    
         
     | 
| 
         | 
|
| 79 | 
         
             
                pipe.to("cuda")
         
     | 
| 80 | 
         
             
                seed = int(randomize_seed_fn(seed, randomize_seed))
         
     | 
| 81 | 
         
             
                generator = torch.Generator().manual_seed(seed)   
         
     | 
| 82 | 
         | 
| 83 | 
         
             
                options = {
         
     | 
| 84 | 
         
             
                    "prompt":prompt,
         
     | 
| 85 | 
         
            +
                    "style":style,
         
     | 
| 86 | 
         
             
                    "negative_prompt":negative_prompt,
         
     | 
| 87 | 
         
             
                    "width":width,
         
     | 
| 88 | 
         
             
                    "height":height,
         
     |