Upload with huggingface_hub
Browse files- .gitattributes +4 -0
 - AbyssHellVer3-fp16.safetensors +3 -0
 - AbyssHellVer3-no-ema.safetensors +3 -0
 - README.md +20 -0
 - example/000.png +3 -0
 - example/001.png +3 -0
 - example/002.png +3 -0
 - example/003.png +3 -0
 - feature_extractor/preprocessor_config.json +28 -0
 - model_index.json +33 -0
 - scheduler/scheduler_config.json +20 -0
 - text_encoder/config.json +25 -0
 - text_encoder/pytorch_model.bin +3 -0
 - tokenizer/merges.txt +0 -0
 - tokenizer/special_tokens_map.json +24 -0
 - tokenizer/tokenizer_config.json +34 -0
 - tokenizer/vocab.json +0 -0
 - unet/config.json +44 -0
 - unet/diffusion_pytorch_model.bin +3 -0
 - vae/config.json +29 -0
 - vae/diffusion_pytorch_model.bin +3 -0
 
    	
        .gitattributes
    CHANGED
    
    | 
         @@ -32,3 +32,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 
     | 
|
| 32 | 
         
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         
     | 
| 33 | 
         
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         
     | 
| 34 | 
         
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 32 | 
         
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         
     | 
| 33 | 
         
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         
     | 
| 34 | 
         
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         
     | 
| 35 | 
         
            +
            example/000.png filter=lfs diff=lfs merge=lfs -text
         
     | 
| 36 | 
         
            +
            example/001.png filter=lfs diff=lfs merge=lfs -text
         
     | 
| 37 | 
         
            +
            example/002.png filter=lfs diff=lfs merge=lfs -text
         
     | 
| 38 | 
         
            +
            example/003.png filter=lfs diff=lfs merge=lfs -text
         
     | 
    	
        AbyssHellVer3-fp16.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:bfa66b30691658c1675c3a354bc3e7826b51fe269db0aa76a804fc47b045a113
         
     | 
| 3 | 
         
            +
            size 2132625431
         
     | 
    	
        AbyssHellVer3-no-ema.safetensors
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:011995961e18da2f33c9b25e720a34f7bbe1587eafc33d77050084d05a747b61
         
     | 
| 3 | 
         
            +
            size 4265096689
         
     | 
    	
        README.md
    ADDED
    
    | 
         @@ -0,0 +1,20 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            ---
         
     | 
| 2 | 
         
            +
            license: creativeml-openrail-m
         
     | 
| 3 | 
         
            +
            library_name: diffusers
         
     | 
| 4 | 
         
            +
            pipeline_tag: text-to-image
         
     | 
| 5 | 
         
            +
            tags:
         
     | 
| 6 | 
         
            +
            - stable-diffusion
         
     | 
| 7 | 
         
            +
            - aiartchan
         
     | 
| 8 | 
         
            +
            ---
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            # AbyssHellVer3 (어비스 헬 히어로 바리에이션)
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            [원본글](https://arca.live/b/aiart/70498939)
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            [huggingface](https://huggingface.co/KMAZ/TestSamples)
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            # Download
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            [original 7.7GB](https://huggingface.co/KMAZ/TestSamples/resolve/main/AbyssHellVer3.ckpt)
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            AbyssOrangeMix2 + JK Style 0.27 + Helltaker 0.2 + HeroAcademia 0.2로 병합한 AbyssHellHero 바리에이션.
         
     | 
    	
        example/000.png
    ADDED
    
    
											 
									 | 
									
								
											Git LFS Details
  | 
									
    	
        example/001.png
    ADDED
    
    
											 
									 | 
									
								
											Git LFS Details
  | 
									
    	
        example/002.png
    ADDED
    
    
											 
									 | 
									
								
											Git LFS Details
  | 
									
    	
        example/003.png
    ADDED
    
    
											 
									 | 
									
								
											Git LFS Details
  | 
									
    	
        feature_extractor/preprocessor_config.json
    ADDED
    
    | 
         @@ -0,0 +1,28 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "crop_size": {
         
     | 
| 3 | 
         
            +
                "height": 224,
         
     | 
| 4 | 
         
            +
                "width": 224
         
     | 
| 5 | 
         
            +
              },
         
     | 
| 6 | 
         
            +
              "do_center_crop": true,
         
     | 
| 7 | 
         
            +
              "do_convert_rgb": true,
         
     | 
| 8 | 
         
            +
              "do_normalize": true,
         
     | 
| 9 | 
         
            +
              "do_rescale": true,
         
     | 
| 10 | 
         
            +
              "do_resize": true,
         
     | 
| 11 | 
         
            +
              "feature_extractor_type": "CLIPFeatureExtractor",
         
     | 
| 12 | 
         
            +
              "image_mean": [
         
     | 
| 13 | 
         
            +
                0.48145466,
         
     | 
| 14 | 
         
            +
                0.4578275,
         
     | 
| 15 | 
         
            +
                0.40821073
         
     | 
| 16 | 
         
            +
              ],
         
     | 
| 17 | 
         
            +
              "image_processor_type": "CLIPFeatureExtractor",
         
     | 
| 18 | 
         
            +
              "image_std": [
         
     | 
| 19 | 
         
            +
                0.26862954,
         
     | 
| 20 | 
         
            +
                0.26130258,
         
     | 
| 21 | 
         
            +
                0.27577711
         
     | 
| 22 | 
         
            +
              ],
         
     | 
| 23 | 
         
            +
              "resample": 3,
         
     | 
| 24 | 
         
            +
              "rescale_factor": 0.00392156862745098,
         
     | 
| 25 | 
         
            +
              "size": {
         
     | 
| 26 | 
         
            +
                "shortest_edge": 224
         
     | 
| 27 | 
         
            +
              }
         
     | 
| 28 | 
         
            +
            }
         
     | 
    	
        model_index.json
    ADDED
    
    | 
         @@ -0,0 +1,33 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_class_name": "StableDiffusionPipeline",
         
     | 
| 3 | 
         
            +
              "_diffusers_version": "0.12.1",
         
     | 
| 4 | 
         
            +
              "feature_extractor": [
         
     | 
| 5 | 
         
            +
                "transformers",
         
     | 
| 6 | 
         
            +
                "CLIPFeatureExtractor"
         
     | 
| 7 | 
         
            +
              ],
         
     | 
| 8 | 
         
            +
              "requires_safety_checker": false,
         
     | 
| 9 | 
         
            +
              "safety_checker": [
         
     | 
| 10 | 
         
            +
                null,
         
     | 
| 11 | 
         
            +
                null
         
     | 
| 12 | 
         
            +
              ],
         
     | 
| 13 | 
         
            +
              "scheduler": [
         
     | 
| 14 | 
         
            +
                "diffusers",
         
     | 
| 15 | 
         
            +
                "DPMSolverMultistepScheduler"
         
     | 
| 16 | 
         
            +
              ],
         
     | 
| 17 | 
         
            +
              "text_encoder": [
         
     | 
| 18 | 
         
            +
                "transformers",
         
     | 
| 19 | 
         
            +
                "CLIPTextModel"
         
     | 
| 20 | 
         
            +
              ],
         
     | 
| 21 | 
         
            +
              "tokenizer": [
         
     | 
| 22 | 
         
            +
                "transformers",
         
     | 
| 23 | 
         
            +
                "CLIPTokenizer"
         
     | 
| 24 | 
         
            +
              ],
         
     | 
| 25 | 
         
            +
              "unet": [
         
     | 
| 26 | 
         
            +
                "diffusers",
         
     | 
| 27 | 
         
            +
                "UNet2DConditionModel"
         
     | 
| 28 | 
         
            +
              ],
         
     | 
| 29 | 
         
            +
              "vae": [
         
     | 
| 30 | 
         
            +
                "diffusers",
         
     | 
| 31 | 
         
            +
                "AutoencoderKL"
         
     | 
| 32 | 
         
            +
              ]
         
     | 
| 33 | 
         
            +
            }
         
     | 
    	
        scheduler/scheduler_config.json
    ADDED
    
    | 
         @@ -0,0 +1,20 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_class_name": "DPMSolverMultistepScheduler",
         
     | 
| 3 | 
         
            +
              "_diffusers_version": "0.12.1",
         
     | 
| 4 | 
         
            +
              "algorithm_type": "dpmsolver++",
         
     | 
| 5 | 
         
            +
              "beta_end": 0.012,
         
     | 
| 6 | 
         
            +
              "beta_schedule": "scaled_linear",
         
     | 
| 7 | 
         
            +
              "beta_start": 0.00085,
         
     | 
| 8 | 
         
            +
              "clip_sample": false,
         
     | 
| 9 | 
         
            +
              "dynamic_thresholding_ratio": 0.995,
         
     | 
| 10 | 
         
            +
              "lower_order_final": true,
         
     | 
| 11 | 
         
            +
              "num_train_timesteps": 1000,
         
     | 
| 12 | 
         
            +
              "prediction_type": "epsilon",
         
     | 
| 13 | 
         
            +
              "sample_max_value": 1.0,
         
     | 
| 14 | 
         
            +
              "set_alpha_to_one": false,
         
     | 
| 15 | 
         
            +
              "solver_order": 2,
         
     | 
| 16 | 
         
            +
              "solver_type": "midpoint",
         
     | 
| 17 | 
         
            +
              "steps_offset": 1,
         
     | 
| 18 | 
         
            +
              "thresholding": false,
         
     | 
| 19 | 
         
            +
              "trained_betas": null
         
     | 
| 20 | 
         
            +
            }
         
     | 
    	
        text_encoder/config.json
    ADDED
    
    | 
         @@ -0,0 +1,25 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_name_or_path": "openai/clip-vit-large-patch14",
         
     | 
| 3 | 
         
            +
              "architectures": [
         
     | 
| 4 | 
         
            +
                "CLIPTextModel"
         
     | 
| 5 | 
         
            +
              ],
         
     | 
| 6 | 
         
            +
              "attention_dropout": 0.0,
         
     | 
| 7 | 
         
            +
              "bos_token_id": 0,
         
     | 
| 8 | 
         
            +
              "dropout": 0.0,
         
     | 
| 9 | 
         
            +
              "eos_token_id": 2,
         
     | 
| 10 | 
         
            +
              "hidden_act": "quick_gelu",
         
     | 
| 11 | 
         
            +
              "hidden_size": 768,
         
     | 
| 12 | 
         
            +
              "initializer_factor": 1.0,
         
     | 
| 13 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 14 | 
         
            +
              "intermediate_size": 3072,
         
     | 
| 15 | 
         
            +
              "layer_norm_eps": 1e-05,
         
     | 
| 16 | 
         
            +
              "max_position_embeddings": 77,
         
     | 
| 17 | 
         
            +
              "model_type": "clip_text_model",
         
     | 
| 18 | 
         
            +
              "num_attention_heads": 12,
         
     | 
| 19 | 
         
            +
              "num_hidden_layers": 12,
         
     | 
| 20 | 
         
            +
              "pad_token_id": 1,
         
     | 
| 21 | 
         
            +
              "projection_dim": 768,
         
     | 
| 22 | 
         
            +
              "torch_dtype": "float32",
         
     | 
| 23 | 
         
            +
              "transformers_version": "4.26.1",
         
     | 
| 24 | 
         
            +
              "vocab_size": 49408
         
     | 
| 25 | 
         
            +
            }
         
     | 
    	
        text_encoder/pytorch_model.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:f572d6fb2f7c0727a0f8c55df47db8937b3d23465a3f2903c67778fe8257de4f
         
     | 
| 3 | 
         
            +
            size 492307041
         
     | 
    	
        tokenizer/merges.txt
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        tokenizer/special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,24 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "bos_token": {
         
     | 
| 3 | 
         
            +
                "content": "<|startoftext|>",
         
     | 
| 4 | 
         
            +
                "lstrip": false,
         
     | 
| 5 | 
         
            +
                "normalized": true,
         
     | 
| 6 | 
         
            +
                "rstrip": false,
         
     | 
| 7 | 
         
            +
                "single_word": false
         
     | 
| 8 | 
         
            +
              },
         
     | 
| 9 | 
         
            +
              "eos_token": {
         
     | 
| 10 | 
         
            +
                "content": "<|endoftext|>",
         
     | 
| 11 | 
         
            +
                "lstrip": false,
         
     | 
| 12 | 
         
            +
                "normalized": true,
         
     | 
| 13 | 
         
            +
                "rstrip": false,
         
     | 
| 14 | 
         
            +
                "single_word": false
         
     | 
| 15 | 
         
            +
              },
         
     | 
| 16 | 
         
            +
              "pad_token": "<|endoftext|>",
         
     | 
| 17 | 
         
            +
              "unk_token": {
         
     | 
| 18 | 
         
            +
                "content": "<|endoftext|>",
         
     | 
| 19 | 
         
            +
                "lstrip": false,
         
     | 
| 20 | 
         
            +
                "normalized": true,
         
     | 
| 21 | 
         
            +
                "rstrip": false,
         
     | 
| 22 | 
         
            +
                "single_word": false
         
     | 
| 23 | 
         
            +
              }
         
     | 
| 24 | 
         
            +
            }
         
     | 
    	
        tokenizer/tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,34 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "add_prefix_space": false,
         
     | 
| 3 | 
         
            +
              "bos_token": {
         
     | 
| 4 | 
         
            +
                "__type": "AddedToken",
         
     | 
| 5 | 
         
            +
                "content": "<|startoftext|>",
         
     | 
| 6 | 
         
            +
                "lstrip": false,
         
     | 
| 7 | 
         
            +
                "normalized": true,
         
     | 
| 8 | 
         
            +
                "rstrip": false,
         
     | 
| 9 | 
         
            +
                "single_word": false
         
     | 
| 10 | 
         
            +
              },
         
     | 
| 11 | 
         
            +
              "do_lower_case": true,
         
     | 
| 12 | 
         
            +
              "eos_token": {
         
     | 
| 13 | 
         
            +
                "__type": "AddedToken",
         
     | 
| 14 | 
         
            +
                "content": "<|endoftext|>",
         
     | 
| 15 | 
         
            +
                "lstrip": false,
         
     | 
| 16 | 
         
            +
                "normalized": true,
         
     | 
| 17 | 
         
            +
                "rstrip": false,
         
     | 
| 18 | 
         
            +
                "single_word": false
         
     | 
| 19 | 
         
            +
              },
         
     | 
| 20 | 
         
            +
              "errors": "replace",
         
     | 
| 21 | 
         
            +
              "model_max_length": 77,
         
     | 
| 22 | 
         
            +
              "name_or_path": "openai/clip-vit-large-patch14",
         
     | 
| 23 | 
         
            +
              "pad_token": "<|endoftext|>",
         
     | 
| 24 | 
         
            +
              "special_tokens_map_file": "./special_tokens_map.json",
         
     | 
| 25 | 
         
            +
              "tokenizer_class": "CLIPTokenizer",
         
     | 
| 26 | 
         
            +
              "unk_token": {
         
     | 
| 27 | 
         
            +
                "__type": "AddedToken",
         
     | 
| 28 | 
         
            +
                "content": "<|endoftext|>",
         
     | 
| 29 | 
         
            +
                "lstrip": false,
         
     | 
| 30 | 
         
            +
                "normalized": true,
         
     | 
| 31 | 
         
            +
                "rstrip": false,
         
     | 
| 32 | 
         
            +
                "single_word": false
         
     | 
| 33 | 
         
            +
              }
         
     | 
| 34 | 
         
            +
            }
         
     | 
    	
        tokenizer/vocab.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        unet/config.json
    ADDED
    
    | 
         @@ -0,0 +1,44 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_class_name": "UNet2DConditionModel",
         
     | 
| 3 | 
         
            +
              "_diffusers_version": "0.12.1",
         
     | 
| 4 | 
         
            +
              "act_fn": "silu",
         
     | 
| 5 | 
         
            +
              "attention_head_dim": 8,
         
     | 
| 6 | 
         
            +
              "block_out_channels": [
         
     | 
| 7 | 
         
            +
                320,
         
     | 
| 8 | 
         
            +
                640,
         
     | 
| 9 | 
         
            +
                1280,
         
     | 
| 10 | 
         
            +
                1280
         
     | 
| 11 | 
         
            +
              ],
         
     | 
| 12 | 
         
            +
              "center_input_sample": false,
         
     | 
| 13 | 
         
            +
              "class_embed_type": null,
         
     | 
| 14 | 
         
            +
              "cross_attention_dim": 768,
         
     | 
| 15 | 
         
            +
              "down_block_types": [
         
     | 
| 16 | 
         
            +
                "CrossAttnDownBlock2D",
         
     | 
| 17 | 
         
            +
                "CrossAttnDownBlock2D",
         
     | 
| 18 | 
         
            +
                "CrossAttnDownBlock2D",
         
     | 
| 19 | 
         
            +
                "DownBlock2D"
         
     | 
| 20 | 
         
            +
              ],
         
     | 
| 21 | 
         
            +
              "downsample_padding": 1,
         
     | 
| 22 | 
         
            +
              "dual_cross_attention": false,
         
     | 
| 23 | 
         
            +
              "flip_sin_to_cos": true,
         
     | 
| 24 | 
         
            +
              "freq_shift": 0,
         
     | 
| 25 | 
         
            +
              "in_channels": 4,
         
     | 
| 26 | 
         
            +
              "layers_per_block": 2,
         
     | 
| 27 | 
         
            +
              "mid_block_scale_factor": 1,
         
     | 
| 28 | 
         
            +
              "mid_block_type": "UNetMidBlock2DCrossAttn",
         
     | 
| 29 | 
         
            +
              "norm_eps": 1e-05,
         
     | 
| 30 | 
         
            +
              "norm_num_groups": 32,
         
     | 
| 31 | 
         
            +
              "num_class_embeds": null,
         
     | 
| 32 | 
         
            +
              "only_cross_attention": false,
         
     | 
| 33 | 
         
            +
              "out_channels": 4,
         
     | 
| 34 | 
         
            +
              "resnet_time_scale_shift": "default",
         
     | 
| 35 | 
         
            +
              "sample_size": 64,
         
     | 
| 36 | 
         
            +
              "up_block_types": [
         
     | 
| 37 | 
         
            +
                "UpBlock2D",
         
     | 
| 38 | 
         
            +
                "CrossAttnUpBlock2D",
         
     | 
| 39 | 
         
            +
                "CrossAttnUpBlock2D",
         
     | 
| 40 | 
         
            +
                "CrossAttnUpBlock2D"
         
     | 
| 41 | 
         
            +
              ],
         
     | 
| 42 | 
         
            +
              "upcast_attention": false,
         
     | 
| 43 | 
         
            +
              "use_linear_projection": false
         
     | 
| 44 | 
         
            +
            }
         
     | 
    	
        unet/diffusion_pytorch_model.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:a326597ccbfed692dadda96cdafc02178975aeaafcaf1a796f4fa47c209d12a5
         
     | 
| 3 | 
         
            +
            size 3438366373
         
     | 
    	
        vae/config.json
    ADDED
    
    | 
         @@ -0,0 +1,29 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_class_name": "AutoencoderKL",
         
     | 
| 3 | 
         
            +
              "_diffusers_version": "0.12.1",
         
     | 
| 4 | 
         
            +
              "act_fn": "silu",
         
     | 
| 5 | 
         
            +
              "block_out_channels": [
         
     | 
| 6 | 
         
            +
                128,
         
     | 
| 7 | 
         
            +
                256,
         
     | 
| 8 | 
         
            +
                512,
         
     | 
| 9 | 
         
            +
                512
         
     | 
| 10 | 
         
            +
              ],
         
     | 
| 11 | 
         
            +
              "down_block_types": [
         
     | 
| 12 | 
         
            +
                "DownEncoderBlock2D",
         
     | 
| 13 | 
         
            +
                "DownEncoderBlock2D",
         
     | 
| 14 | 
         
            +
                "DownEncoderBlock2D",
         
     | 
| 15 | 
         
            +
                "DownEncoderBlock2D"
         
     | 
| 16 | 
         
            +
              ],
         
     | 
| 17 | 
         
            +
              "in_channels": 3,
         
     | 
| 18 | 
         
            +
              "latent_channels": 4,
         
     | 
| 19 | 
         
            +
              "layers_per_block": 2,
         
     | 
| 20 | 
         
            +
              "norm_num_groups": 32,
         
     | 
| 21 | 
         
            +
              "out_channels": 3,
         
     | 
| 22 | 
         
            +
              "sample_size": 512,
         
     | 
| 23 | 
         
            +
              "up_block_types": [
         
     | 
| 24 | 
         
            +
                "UpDecoderBlock2D",
         
     | 
| 25 | 
         
            +
                "UpDecoderBlock2D",
         
     | 
| 26 | 
         
            +
                "UpDecoderBlock2D",
         
     | 
| 27 | 
         
            +
                "UpDecoderBlock2D"
         
     | 
| 28 | 
         
            +
              ]
         
     | 
| 29 | 
         
            +
            }
         
     | 
    	
        vae/diffusion_pytorch_model.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:f3c9d55d6b0a6e9c0ab48da752936a8b8dc57ae0ed3b4375f970247ee14c18b2
         
     | 
| 3 | 
         
            +
            size 334711857
         
     |