Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							·
						
						5063f38
	
1
								Parent(s):
							
							9c0918a
								
    	
        options/Banner_Model/Image2Image_2.py
    CHANGED
    
    | 
         @@ -1,34 +1,27 @@ 
     | 
|
| 1 | 
         
            -
             
     | 
| 2 | 
         
            -
             
     | 
| 3 | 
         
            -
             
     | 
| 4 | 
         
            -
             
     | 
| 5 | 
         
            -
             
     | 
| 6 | 
         | 
| 7 | 
         
            -
             
     | 
| 8 | 
         
            -
             
     | 
| 9 | 
         | 
| 10 | 
         
            -
             
     | 
| 11 | 
         
            -
            # def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
         
     | 
| 12 | 
         
            -
            #     processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
         
     | 
| 13 | 
         
            -
             
     | 
| 14 | 
         
            -
            #     checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
         
     | 
| 15 | 
         
            -
            #     controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(device)
         
     | 
| 16 | 
         
            -
            #     pipe = StableDiffusionControlNetPipeline.from_pretrained(
         
     | 
| 17 | 
         
            -
            #         "radames/stable-diffusion-v1-5-img2img", controlnet=controlnet, torch_dtype=torch.float16
         
     | 
| 18 | 
         
            -
            #     ).to(device)
         
     | 
| 19 | 
         
            -
            #     pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
         
     | 
| 20 | 
         
            -
            #     pipe.enable_model_cpu_offload()
         
     | 
| 21 | 
         
            -
            #     if not isinstance(image, Image.Image):
         
     | 
| 22 | 
         
            -
            #         image = Image.fromarray(image)
         
     | 
| 23 | 
         
            -
            #     image.resize((size,size))
         
     | 
| 24 | 
         
            -
            #     image=processor(image)
         
     | 
| 25 | 
         
            -
            #     generator = torch.Generator(device=device).manual_seed(0)
         
     | 
| 26 | 
         
            -
            #     image = pipe(prompt+"best quality, extremely detailed", num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",guidance_scale=guidance_scale).images[0]
         
     | 
| 27 | 
         
            -
            #     return image
         
     | 
| 28 | 
         
            -
             
     | 
| 29 | 
         
            -
            from gradio_client import Client
         
     | 
| 30 | 
         
             
            def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
         
     | 
| 31 | 
         
            -
                 
     | 
| 32 | 
         
            -
             
     | 
| 33 | 
         
            -
                 
     | 
| 34 | 
         
            -
                 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import spaces
         
     | 
| 2 | 
         
            +
            import torch
         
     | 
| 3 | 
         
            +
            from controlnet_aux import LineartDetector
         
     | 
| 4 | 
         
            +
            from diffusers import ControlNetModel,UniPCMultistepScheduler,StableDiffusionControlNetPipeline
         
     | 
| 5 | 
         
            +
            from PIL import Image
         
     | 
| 6 | 
         | 
| 7 | 
         
            +
            device= "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 8 | 
         
            +
            print("Using device for I2I_2:", device)
         
     | 
| 9 | 
         | 
| 10 | 
         
            +
            @spaces.GPU(duration=100)
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 11 | 
         
             
            def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
         
     | 
| 12 | 
         
            +
                processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
                checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
         
     | 
| 15 | 
         
            +
                controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(device)
         
     | 
| 16 | 
         
            +
                pipe = StableDiffusionControlNetPipeline.from_pretrained(
         
     | 
| 17 | 
         
            +
                    "radames/stable-diffusion-v1-5-img2img", controlnet=controlnet, torch_dtype=torch.float16
         
     | 
| 18 | 
         
            +
                ).to(device)
         
     | 
| 19 | 
         
            +
                pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
         
     | 
| 20 | 
         
            +
                pipe.enable_model_cpu_offload()
         
     | 
| 21 | 
         
            +
                if not isinstance(image, Image.Image):
         
     | 
| 22 | 
         
            +
                    image = Image.fromarray(image)
         
     | 
| 23 | 
         
            +
                image.resize((size,size))
         
     | 
| 24 | 
         
            +
                image=processor(image)
         
     | 
| 25 | 
         
            +
                generator = torch.Generator(device=device).manual_seed(0)
         
     | 
| 26 | 
         
            +
                image = pipe(prompt+"best quality, extremely detailed", num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",guidance_scale=guidance_scale).images[0]
         
     | 
| 27 | 
         
            +
                return image
         
     |