diff --git a/Dockerfile b/Dockerfile index dc78de639312f3b6da1997e9e9c3b3b279e1f1aa..8e5073bcd7eac850c65a5d520978331fe0e94c28 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,4 +16,4 @@ COPY --chown=user train.sh pytorch-image-models RUN chmod +x pytorch-image-models/train.sh COPY --chown=user . /app -CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"] +CMD ["python", "app.py"] diff --git a/app.py b/app.py index 92efcc20bbf4c2d18e2d99c056bfe09a3040a2a2..e414b53e1df9d688194abe54c913afa4e2e0ed72 100644 --- a/app.py +++ b/app.py @@ -1,6 +1,6 @@ import os -from fastapi import FastAPI -import subprocess +import gradio as gr + import wandb from huggingface_hub import HfApi @@ -9,13 +9,11 @@ API = HfApi(token=TOKEN) wandb_api_key = os.environ.get('wandb_api_key') wandb.login(key=wandb_api_key) -random_num = 80.0 +random_num = '80.0' subset = 'frac-1over64' experiment_name = f"ImageNetTraining{random_num}-{subset}" experiment_repo = f"datacomp/{experiment_name}" -app = FastAPI() -@app.get("/") def start_train(): os.system("echo '#### pwd'") os.system("pwd") @@ -33,11 +31,21 @@ def start_train(): # Handles CUDA OOM errors. os.system(f"export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True") os.system("echo 'Okay, trying training.'") - os.system(f"cd pytorch-image-models; ./train.sh 4 --dataset hfds/datacomp/imagenet-1k-random-{random_num}-{subset} --log-wandb --experiment ImageNetTraining{random_num}-{subset} --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4") + os.system(f"cd pytorch-image-models; ./train.sh 4 --dataset hfds/datacomp/imagenet-1k-random-{random_num}-{subset} --log-wandb --wandb-project {experiment_name} --experiment ImageNetTraining{random_num}-{subset} --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4") os.system("echo 'Done'.") os.system("ls") # Upload output to repository os.system("echo 'trying to upload...'") API.upload_folder(folder_path="/app", repo_id=f"{experiment_repo}", repo_type="dataset",) API.pause_space(experiment_repo) - return {"Completed": "!"} \ No newline at end of file + +def run(): + with gr.Blocks() as app: + gr.Markdown(f"Randomization: {random_num}") + gr.Markdown(f"Subset: {subset}") + start = gr.Button("Start") + start.click(start_train) + app.launch(server_name="0.0.0.0", server_port=7860) + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/pytorch-image-models/hfdocs/source/models.mdx b/pytorch-image-models/hfdocs/source/models.mdx index c993f887e20547e538f264c24710878ff357359d..97ff00b9ec75e57e2d63910b4384c69c646ae9c3 100644 --- a/pytorch-image-models/hfdocs/source/models.mdx +++ b/pytorch-image-models/hfdocs/source/models.mdx @@ -33,7 +33,7 @@ A more exciting view (with pretty pictures) of the models within `timm` can be f ## DLA * Implementation: [dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py) -* Paper: https://arxiv.org/abs/1707.06484 +* Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 * Code: https://github.com/ucbdrive/dla ## Dual-Path Networks @@ -78,14 +78,14 @@ A more exciting view (with pretty pictures) of the models within `timm` can be f ## NASNet-A * Implementation: [nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py) -* Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 +* Paper: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## PNasNet-5 * Implementation: [pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py) -* Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 +* Paper: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet diff --git a/pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx b/pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx index fd7fce479817b78a3db9ce59ab4a050ad3e3e4af..180bf16a7d5b5c6a15583d2c749956116b0d34f2 100644 --- a/pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx +++ b/pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/advprop.mdx b/pytorch-image-models/hfdocs/source/models/advprop.mdx index 0743a1e413da2a7561db7625ef3da2137dce9cb5..9ac7c754b3cf184f685fd5bf867a7b5c7d2fd5bb 100644 --- a/pytorch-image-models/hfdocs/source/models/advprop.mdx +++ b/pytorch-image-models/hfdocs/source/models/advprop.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/big-transfer.mdx b/pytorch-image-models/hfdocs/source/models/big-transfer.mdx index 05255c356a7d2c86a5209c38867e37ac719dc48d..50d6f96efe37666944185c844289fc3900098830 100644 --- a/pytorch-image-models/hfdocs/source/models/big-transfer.mdx +++ b/pytorch-image-models/hfdocs/source/models/big-transfer.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/csp-darknet.mdx b/pytorch-image-models/hfdocs/source/models/csp-darknet.mdx index 7accdbaefb4c2fbc39e2a26a79aa88037a370b51..b924be7c280e400b9454c1cac7c08ca8080ef46f 100644 --- a/pytorch-image-models/hfdocs/source/models/csp-darknet.mdx +++ b/pytorch-image-models/hfdocs/source/models/csp-darknet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/csp-resnet.mdx b/pytorch-image-models/hfdocs/source/models/csp-resnet.mdx index 66971057f307a714cf28a8d56e4a8d2c1e4b8520..2c74275d0cf29fc43d1c5469a21df410b267149f 100644 --- a/pytorch-image-models/hfdocs/source/models/csp-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/csp-resnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/csp-resnext.mdx b/pytorch-image-models/hfdocs/source/models/csp-resnext.mdx index d9eb8383760cbb6440231abe3f43259a1d2d009b..57bb090ef87701a4a5a7a42952a2377987c2e3e4 100644 --- a/pytorch-image-models/hfdocs/source/models/csp-resnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/csp-resnext.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/densenet.mdx b/pytorch-image-models/hfdocs/source/models/densenet.mdx index cec9ba171c32560a6da4ddf94e12cb4be96d6468..58393c2106deba94201bdec59791683fe794aeed 100644 --- a/pytorch-image-models/hfdocs/source/models/densenet.mdx +++ b/pytorch-image-models/hfdocs/source/models/densenet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/dla.mdx b/pytorch-image-models/hfdocs/source/models/dla.mdx index ab38d62677563ff225f204a95598eba3ac9c2f37..2cfcd1055c79652585fad7b0d45fdd4a7f554570 100644 --- a/pytorch-image-models/hfdocs/source/models/dla.mdx +++ b/pytorch-image-models/hfdocs/source/models/dla.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/dpn.mdx b/pytorch-image-models/hfdocs/source/models/dpn.mdx index 26d8a4a7b7db52024f4e63d13350f250044984c1..0c28c66f52813d1c7e2d420f9583019e81c6277a 100644 --- a/pytorch-image-models/hfdocs/source/models/dpn.mdx +++ b/pytorch-image-models/hfdocs/source/models/dpn.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/ecaresnet.mdx b/pytorch-image-models/hfdocs/source/models/ecaresnet.mdx index c5c1619ea31d9100274c1dbd0fd2e1022a4c4524..d4aa372e0eb7730dd75b5e9215a39d617a344c3e 100644 --- a/pytorch-image-models/hfdocs/source/models/ecaresnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/ecaresnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx b/pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx index 444f6d521d4560e012c514bba65623e15d1634b9..d6bc760ee244767596691bf3c74efeeb1d7212a9 100644 --- a/pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx +++ b/pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx @@ -1,6 +1,6 @@ # EfficientNet (Knapsack Pruned) -**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way. +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. @@ -79,7 +79,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/efficientnet.mdx b/pytorch-image-models/hfdocs/source/models/efficientnet.mdx index 4bcaba622c6eb9105860bfe8875b400f95735eeb..bb86ab83039ab193aa4cc2f643ab91dfa98ad58a 100644 --- a/pytorch-image-models/hfdocs/source/models/efficientnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/efficientnet.mdx @@ -1,6 +1,6 @@ # EfficientNet -**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way. +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx b/pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx index 4781aa1f1884525a316cff689b3db18b9f38d85d..19373ffe71ee4933dce34987c996459f6ec27107 100644 --- a/pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx +++ b/pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx b/pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx index d92a603440502ac7808cd132696ab7ad5d15d428..51a41144ff01453b759235d93bc95c562b438bb8 100644 --- a/pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/ese-vovnet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/fbnet.mdx b/pytorch-image-models/hfdocs/source/models/fbnet.mdx index cdb3ba610c358c31418baeb0dac4482925b879c5..b96eaed7c0f0d5814da2d589a42dfbb3d5705a9f 100644 --- a/pytorch-image-models/hfdocs/source/models/fbnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/fbnet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/gloun-inception-v3.mdx b/pytorch-image-models/hfdocs/source/models/gloun-inception-v3.mdx index edf3d87075ff16e072eacf55aaaff48d5deba47e..212ba7b4f1caea3479300eb0a5e9af9bce100006 100644 --- a/pytorch-image-models/hfdocs/source/models/gloun-inception-v3.mdx +++ b/pytorch-image-models/hfdocs/source/models/gloun-inception-v3.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx b/pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx index 8cd9705690d675ad51a63bebeda34a41bf278cb8..85e5ddbecf1a2bb18809fddba6ab03ed3120b6af 100644 --- a/pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx b/pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx index dd4cd03364b8c4a78ccc7f4b59352002283b7277..89f9d24bf9961f5a47835cd0ed7084b38873a667 100644 --- a/pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/gloun-senet.mdx b/pytorch-image-models/hfdocs/source/models/gloun-senet.mdx index ee28d7c81f656518a85b13e27efb29fc7864af2b..f8df6d87f941a5d1b8517fb18e790d0e8bd9d33e 100644 --- a/pytorch-image-models/hfdocs/source/models/gloun-senet.mdx +++ b/pytorch-image-models/hfdocs/source/models/gloun-senet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx b/pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx index bb43215549cd20ce3391ad912b122a4830e81b39..aedf70919ea4aadf173d77d5357b66f4b6343044 100644 --- a/pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/gloun-seresnext.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/gloun-xception.mdx b/pytorch-image-models/hfdocs/source/models/gloun-xception.mdx index 0dee376e7cd57d0d9f2a5a8d17e9ef0f1176948d..e79b8d3109de5ccbb991158de7c129bee0c7b672 100644 --- a/pytorch-image-models/hfdocs/source/models/gloun-xception.mdx +++ b/pytorch-image-models/hfdocs/source/models/gloun-xception.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/hrnet.mdx b/pytorch-image-models/hfdocs/source/models/hrnet.mdx index 3437b96b79bb84b4e27b3857778950d3000af0e4..b3f453e4226edb21995f1665b8cae5da8f173323 100644 --- a/pytorch-image-models/hfdocs/source/models/hrnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/hrnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/ig-resnext.mdx b/pytorch-image-models/hfdocs/source/models/ig-resnext.mdx index d5a035ba63f29bfa1ba729f418df4c5cd035e85e..8fd5dbe808e778d6b259980eee44961ab3ed0e5b 100644 --- a/pytorch-image-models/hfdocs/source/models/ig-resnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/ig-resnext.mdx @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx b/pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx index c4eb15869d8526ba67c60c0f707c1b81cd7f143f..c746cead19d61d3aa5ca4bfe549035ed8542ad18 100644 --- a/pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx +++ b/pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/inception-v3.mdx b/pytorch-image-models/hfdocs/source/models/inception-v3.mdx index cd21e5de0f319c70891e1e22ce1180868013113b..c6ff6931e2b0721ca849c9297092d15522a2f529 100644 --- a/pytorch-image-models/hfdocs/source/models/inception-v3.mdx +++ b/pytorch-image-models/hfdocs/source/models/inception-v3.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/inception-v4.mdx b/pytorch-image-models/hfdocs/source/models/inception-v4.mdx index f9b4adf05157b2c87b6630fe932a89338e1ee41a..4072fbec15ee0b7cb1574ab49d95df08ca577075 100644 --- a/pytorch-image-models/hfdocs/source/models/inception-v4.mdx +++ b/pytorch-image-models/hfdocs/source/models/inception-v4.mdx @@ -72,7 +72,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx b/pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx index 68ed5a30b9bf3ce8255b6e0d2a47d9a83dd731f3..d96c75d689d4243b42bed1a55fb08bd891361373 100644 --- a/pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/legacy-se-resnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx b/pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx index 8ff527f3dabb4f3a28b9cd87de02475ab989ad60..eef0c977db62297b442e6c08918aec0a5a02bdca 100644 --- a/pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/legacy-senet.mdx b/pytorch-image-models/hfdocs/source/models/legacy-senet.mdx index 9768d868e07710b55bbdc174cdb766458c6985c9..750621ff091ca8d638ff25a3c7074d19a369659b 100644 --- a/pytorch-image-models/hfdocs/source/models/legacy-senet.mdx +++ b/pytorch-image-models/hfdocs/source/models/legacy-senet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/mixnet.mdx b/pytorch-image-models/hfdocs/source/models/mixnet.mdx index 2e058027eaabd2095c80ac6ad2a430d2072bf36e..2ef8885e6c61106fb14e21f864c4daa6cb83eed6 100644 --- a/pytorch-image-models/hfdocs/source/models/mixnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/mixnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/mnasnet.mdx b/pytorch-image-models/hfdocs/source/models/mnasnet.mdx index e49c06cc73da9d14a9d3b34896aff6a011c3b14d..23f217206a13ffe39fc43a51acd3e4f2c5b0ad09 100644 --- a/pytorch-image-models/hfdocs/source/models/mnasnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/mnasnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx b/pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx index 507ec99dd22f55998f5002c45ff929efc414781f..2ea949e932195fd55b5c753d736659815b75cefb 100644 --- a/pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx +++ b/pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx b/pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx index e1ff317ba2500f30ac46b6e9b265e9f1ace4e28e..35795ad0c94c0372e54e259f35bae30be1282eed 100644 --- a/pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx +++ b/pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/nasnet.mdx b/pytorch-image-models/hfdocs/source/models/nasnet.mdx index 2cde9d2d61cca8deb82a093defa9c8dd02c43e7f..2332131a5703308df9b872cd442bed0d394a7972 100644 --- a/pytorch-image-models/hfdocs/source/models/nasnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/nasnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/noisy-student.mdx b/pytorch-image-models/hfdocs/source/models/noisy-student.mdx index 2d12e8c870cdcd434a50ba4999251a54d9105f60..480d48dd206c91ab82628e6c667eaab2bc69cd82 100644 --- a/pytorch-image-models/hfdocs/source/models/noisy-student.mdx +++ b/pytorch-image-models/hfdocs/source/models/noisy-student.mdx @@ -82,7 +82,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/pnasnet.mdx b/pytorch-image-models/hfdocs/source/models/pnasnet.mdx index fff5851cdcb5e64b403dcc7ab463616902c52b35..58141a641fedd77b72421ac3b3ddc6431c787490 100644 --- a/pytorch-image-models/hfdocs/source/models/pnasnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/pnasnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/regnetx.mdx b/pytorch-image-models/hfdocs/source/models/regnetx.mdx index 0a9f8e4b2c1724f2702ff77c9784141597f3ccb9..1e9548bd9ea977377379a0b927d8ac79b28b1c93 100644 --- a/pytorch-image-models/hfdocs/source/models/regnetx.mdx +++ b/pytorch-image-models/hfdocs/source/models/regnetx.mdx @@ -1,10 +1,10 @@ # RegNetX -**RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): +**RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w_{0} > 0 \\), and slope \\( w_{a} > 0 \\), and generates a different block width \\( u_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): -\\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\) +\\( u_{j} = w_{0} + w_{a}\cdot{j} \\) -For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier). +For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w_{m} \geq 2 \\) (the width multiplier). ## How do I use this model on an image? @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/regnety.mdx b/pytorch-image-models/hfdocs/source/models/regnety.mdx index be1a5c1c7507e4d99cf6ef00920751b1c304117c..04d869280b4e9ab52a00b84dea26920264c1dcfe 100644 --- a/pytorch-image-models/hfdocs/source/models/regnety.mdx +++ b/pytorch-image-models/hfdocs/source/models/regnety.mdx @@ -1,10 +1,10 @@ # RegNetY -**RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): +**RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w_{0} > 0 \\), and slope \\( w_{a} > 0 \\), and generates a different block width \\( u_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): -\\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\) +\\( u_{j} = w_{0} + w_{a}\cdot{j} \\) -For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier). +For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w_{m} \geq 2 \\) (the width multiplier). For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). @@ -79,7 +79,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/res2net.mdx b/pytorch-image-models/hfdocs/source/models/res2net.mdx index 751cf35d42fc6c7a67d9b3ca9e126c26f7779f27..bd5598d566e933956dd9246aa919a736860c2607 100644 --- a/pytorch-image-models/hfdocs/source/models/res2net.mdx +++ b/pytorch-image-models/hfdocs/source/models/res2net.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/res2next.mdx b/pytorch-image-models/hfdocs/source/models/res2next.mdx index 8c883dff7a67eb068a79e942984ffa06c698d62a..11ef5ece57c2dcf78bdc2a3888d0c54961000f28 100644 --- a/pytorch-image-models/hfdocs/source/models/res2next.mdx +++ b/pytorch-image-models/hfdocs/source/models/res2next.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/resnest.mdx b/pytorch-image-models/hfdocs/source/models/resnest.mdx index 59ac6c6156125debd8734e3a5b4418cddd8bed32..b6f73dcf6835e72cc70685a6ffb87d6be0350be9 100644 --- a/pytorch-image-models/hfdocs/source/models/resnest.mdx +++ b/pytorch-image-models/hfdocs/source/models/resnest.mdx @@ -1,6 +1,6 @@ # ResNeSt -A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: \\( V = \text{Concat} \\){\\( V^{1},V^{2},\cdots{V}^{K} \\)}. As in standard residual blocks, the final output \\( Y \\) of otheur Split-Attention block is produced using a shortcut connection: \\( Y=V+X \\), if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation \\( \mathcal{T} \\) is applied to the shortcut connection to align the output shapes: \\( Y=V+\mathcal{T}(X) \\). For example, \\( \mathcal{T} \\) can be strided convolution or combined convolution-with-pooling. +A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: \\( V = \text{Concat} \{ V^{1},V^{2},\cdots,{V}^{K} \} \\). As in standard residual blocks, the final output \\( Y \\) of otheur Split-Attention block is produced using a shortcut connection: \\( Y=V+X \\), if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation \\( \mathcal{T} \\) is applied to the shortcut connection to align the output shapes: \\( Y=V+\mathcal{T}(X) \\). For example, \\( \mathcal{T} \\) can be strided convolution or combined convolution-with-pooling. ## How do I use this model on an image? @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/resnet-d.mdx b/pytorch-image-models/hfdocs/source/models/resnet-d.mdx index 1f6b47b02a8dc65a704d8ffb69bcced37a43a3b9..8eb7f0457c646c4b4e8e505f23fdc34214e0df18 100644 --- a/pytorch-image-models/hfdocs/source/models/resnet-d.mdx +++ b/pytorch-image-models/hfdocs/source/models/resnet-d.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/resnet.mdx b/pytorch-image-models/hfdocs/source/models/resnet.mdx index 3f3641f785f5236ba095095c18632b6efc4d4db8..5aeb50c5a4cbeeb9ae45dd156e13ad88ca10e920 100644 --- a/pytorch-image-models/hfdocs/source/models/resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/resnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/resnext.mdx b/pytorch-image-models/hfdocs/source/models/resnext.mdx index 7c5eb166e18d07188e00e276a3e2b4ea4023466f..d89191ecc6319b439ac27abbc0f4e14fd0f5400f 100644 --- a/pytorch-image-models/hfdocs/source/models/resnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/resnext.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/rexnet.mdx b/pytorch-image-models/hfdocs/source/models/rexnet.mdx index 09fb2144b54dad93bea6965e55b82dc70fa95e54..f805729a74e7318dea84bfa3fab8f5ea3945624c 100644 --- a/pytorch-image-models/hfdocs/source/models/rexnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/rexnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/se-resnet.mdx b/pytorch-image-models/hfdocs/source/models/se-resnet.mdx index ba884bde95599de81b2235a5dc387de659b794eb..db1133cde5d69d66908f4dbecad9f2b726778e10 100644 --- a/pytorch-image-models/hfdocs/source/models/se-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/se-resnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/selecsls.mdx b/pytorch-image-models/hfdocs/source/models/selecsls.mdx index 12813ba864d3a8ae8056c26ec8ff83c49d8206c5..aa43c8219fe5514a5b697bd79d99de44eccff1af 100644 --- a/pytorch-image-models/hfdocs/source/models/selecsls.mdx +++ b/pytorch-image-models/hfdocs/source/models/selecsls.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/seresnext.mdx b/pytorch-image-models/hfdocs/source/models/seresnext.mdx index 1f4534966753165318dcb71bc09254e3a0ed1774..997d39629a55586be79fecde37ac9ca3468b1f9b 100644 --- a/pytorch-image-models/hfdocs/source/models/seresnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/seresnext.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/skresnet.mdx b/pytorch-image-models/hfdocs/source/models/skresnet.mdx index 643a6cbcd86f466939f1cf4881bd4b4851695697..1229db5bdb16323319c256b70d627799eeb918b5 100644 --- a/pytorch-image-models/hfdocs/source/models/skresnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/skresnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/skresnext.mdx b/pytorch-image-models/hfdocs/source/models/skresnext.mdx index ae499b34dad41baa4cdf69fd541279a0c6a82eb6..036f895a598906c76524c89de06ac446925bacdb 100644 --- a/pytorch-image-models/hfdocs/source/models/skresnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/skresnext.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/spnasnet.mdx b/pytorch-image-models/hfdocs/source/models/spnasnet.mdx index a589b0fcdd94ee3b20bc5786f54f9812bec462a5..d7adf93b3b695d01d0e14b8a6cda3b1ad8b09039 100644 --- a/pytorch-image-models/hfdocs/source/models/spnasnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/spnasnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx b/pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx index 2e1d0a7a3d0f78ccc1b04a31b56317984714f75a..e781c576bb7ee7234633a0ccba4b5f0f4d390a7a 100644 --- a/pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/swsl-resnet.mdx b/pytorch-image-models/hfdocs/source/models/swsl-resnet.mdx index 2def3c868e06025ebde566b206fd42243a2ec7b1..1e20024f200af2d4ebca12f83a7d9d22fd2f1905 100644 --- a/pytorch-image-models/hfdocs/source/models/swsl-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/swsl-resnet.mdx @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx b/pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx index 66297082f0a1b4b8fc46f8cf33fb46a8ecda94a7..f551b875879b90c8d330b9a6c55103bbe4030526 100644 --- a/pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx +++ b/pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx @@ -77,7 +77,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tf-efficientnet-condconv.mdx b/pytorch-image-models/hfdocs/source/models/tf-efficientnet-condconv.mdx index eb3bcc9b539d8f06efab90d280fb5a0a3d1d9f25..f41db0608b713e01660d801494d3afd88d7843ed 100644 --- a/pytorch-image-models/hfdocs/source/models/tf-efficientnet-condconv.mdx +++ b/pytorch-image-models/hfdocs/source/models/tf-efficientnet-condconv.mdx @@ -1,10 +1,10 @@ # (Tensorflow) EfficientNet CondConv -**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way. +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. -The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to squeeze-and-excitation blocks. +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). This collection of models amends EfficientNet by adding [CondConv](https://paperswithcode.com/method/condconv) convolutions. @@ -81,7 +81,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx b/pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx index f72eaf0c383c151989362bc04853eb91575cd715..75e093ac675552eb02ae4540f1338ba8c201dab6 100644 --- a/pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx +++ b/pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx @@ -1,6 +1,6 @@ # (Tensorflow) EfficientNet Lite -**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way. +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. @@ -81,7 +81,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx b/pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx index 16be0a71b69be62bf8a18273feee14c32d30931f..890ccd2c6feba7373c4d10657e9c134ac1d219cb 100644 --- a/pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx @@ -1,6 +1,6 @@ # (Tensorflow) EfficientNet -**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way. +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. @@ -79,7 +79,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tf-inception-v3.mdx b/pytorch-image-models/hfdocs/source/models/tf-inception-v3.mdx index a5f402a904b2fc8ef01d3915cf87ed8b14911918..ddb871f6f72e20f20b199339c0a2aefd99619567 100644 --- a/pytorch-image-models/hfdocs/source/models/tf-inception-v3.mdx +++ b/pytorch-image-models/hfdocs/source/models/tf-inception-v3.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx b/pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx index d1ddda09fbab9dea98bc946ec358522691dc5e4d..68def0aabc6dc5a59c2dcb2c0801655da6be83f5 100644 --- a/pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx b/pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx index 3e8c6cba3022102122b4c6788fb1f66e56d660c4..9460f536504b39e150b2bab59947777530ff1376 100644 --- a/pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx +++ b/pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/tresnet.mdx b/pytorch-image-models/hfdocs/source/models/tresnet.mdx index f2d5729d3a9e629d7dea5f0e400d28b4844703bd..8ff291057ccdedecef988507638add92327aa078 100644 --- a/pytorch-image-models/hfdocs/source/models/tresnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/tresnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/wide-resnet.mdx b/pytorch-image-models/hfdocs/source/models/wide-resnet.mdx index 47969325a46ed7c0df233613f7344887f01615a6..e8837607bbfb601b64c00796603a64224f40b3c3 100644 --- a/pytorch-image-models/hfdocs/source/models/wide-resnet.mdx +++ b/pytorch-image-models/hfdocs/source/models/wide-resnet.mdx @@ -73,7 +73,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/hfdocs/source/models/xception.mdx b/pytorch-image-models/hfdocs/source/models/xception.mdx index f67e482c20d0a7f43a8205415bb77a2a071ac372..79f97f493724c3837727d5d76907b6a12a2ef98c 100644 --- a/pytorch-image-models/hfdocs/source/models/xception.mdx +++ b/pytorch-image-models/hfdocs/source/models/xception.mdx @@ -75,7 +75,7 @@ script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) ## How do I train this model? -You can follow the [timm recipe scripts](../scripts) for training a new model afresh. +You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation diff --git a/pytorch-image-models/inference.py b/pytorch-image-models/inference.py index 60581978b7de892e5738e9fe89f143555349aba2..2272f4e6184b424b168120965dbacc9e5278b312 100644 --- a/pytorch-image-models/inference.py +++ b/pytorch-image-models/inference.py @@ -12,6 +12,7 @@ import os import time from contextlib import suppress from functools import partial +from sys import maxsize import numpy as np import pandas as pd @@ -104,6 +105,8 @@ parser.add_argument('--amp', action='store_true', default=False, help='use Native AMP for mixed precision training') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16)') +parser.add_argument('--model-dtype', default=None, type=str, + help='Model dtype override (non-AMP) (default: float32)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) @@ -146,6 +149,8 @@ parser.add_argument('--include-index', action='store_true', default=False, help='include the class index in results') parser.add_argument('--exclude-output', action='store_true', default=False, help='exclude logits/probs from results, just indices. topk must be set !=0.') +parser.add_argument('--no-console-results', action='store_true', default=False, + help='disable printing the inference results to the console') def main(): @@ -160,9 +165,15 @@ def main(): device = torch.device(args.device) + model_dtype = None + if args.model_dtype: + assert args.model_dtype in ('float32', 'float16', 'bfloat16') + model_dtype = getattr(torch, args.model_dtype) + # resolve AMP arguments based on PyTorch / Apex availability amp_autocast = suppress if args.amp: + assert model_dtype is None or model_dtype == torch.float32, 'float32 model dtype must be used with AMP' assert args.amp_dtype in ('float16', 'bfloat16') amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16 amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype) @@ -200,7 +211,7 @@ def main(): if args.test_pool: model, test_time_pool = apply_test_time_pool(model, data_config) - model = model.to(device) + model = model.to(device=device, dtype=model_dtype) model.eval() if args.channels_last: model = model.to(memory_format=torch.channels_last) @@ -236,6 +247,7 @@ def main(): use_prefetcher=True, num_workers=workers, device=device, + img_dtype=model_dtype or torch.float32, **data_config, ) @@ -279,7 +291,7 @@ def main(): np_labels = to_label(np_indices) all_labels.append(np_labels) - all_outputs.append(output.cpu().numpy()) + all_outputs.append(output.float().cpu().numpy()) # measure elapsed time batch_time.update(time.time() - end) @@ -338,11 +350,13 @@ def main(): for fmt in args.results_format: save_results(df, results_filename, fmt) - print(f'--result') - print(df.set_index(args.filename_col).to_json(orient='index', indent=4)) + if not args.no_console_results: + print(f'--result') + print(df.set_index(args.filename_col).to_json(orient='index', indent=4)) def save_results(df, results_filename, results_format='csv', filename_col='filename'): + np.set_printoptions(threshold=maxsize) results_filename += _FMT_EXT[results_format] if results_format == 'parquet': df.set_index(filename_col).to_parquet(results_filename) diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/args.yaml b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/args.yaml index d4e3e87d0922229e321b2502f0822fbc395f72bd..a98957cebe901666ba825703816bbf8da9f7e5da 100644 --- a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/args.yaml +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/args.yaml @@ -142,7 +142,7 @@ val_num_samples: null val_split: validation validation_batch_size: null vflip: 0.0 -wandb_project: null +wandb_project: ImageNetTraining80.0-frac-1over64 wandb_resume_id: '' wandb_tags: [] warmup_epochs: 5 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..d2a8a9e029dfc4c653eb8c644e44b690fc4c842b --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ce9d633c40a741ada5078cf540efe2835cb39c9eee84c9e79d048560d1829b +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..ecd665b3c32b23494ce1045bd1f248a3e85582e3 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf9fbd43e27a2dab0ba0d3adda18f54b837335877ec6c220beca06789dd399bb +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..d11afdbd76f934eefdb1055a2ac904ebceeab483 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b52c1b27bc2de8a047098eb89a2e42116182c639d2789ad03faa7e7d277f90d7 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..eb5bd40cdf63ef2154984d99de63770b1c893d07 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d6d60b4abda4a4bc413c99d16d189e5b8d6d822eeeef8f04ecb69e4da42498a +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..ab5189421023d26aae9a55144da92a4e1e23840c --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b31117a394ba95304f4f0caec4609525e24d9bb42793e8faf0fcf5642d2dd2a +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-141.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-141.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..c4b47ef8c1365d5555c16bc63b4e8f57f652e9d2 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-141.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fce1089c77656faaa371874c65d2fee6d00587c8a53d4344181fe525319ee5e +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-142.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-142.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..d6f5183b9cd3753a5eed8cf76e7889d7d670f9aa --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-142.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:291ed251e1c252f9b3df07cf37f95fadcab561f68729d474467554188c349323 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-147.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-147.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..4184a8e81960f054ae0bb3de77a21accd58010cc --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-147.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0961f3981c65663cc6e4ea8de73b8930f060ba2f9cde74deb6fbb15bdeca7f8 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar index eb7716fdf2047a2a7ddea39d11308b6f0170c40b..6e864636cad7532d61bea944a5b976e4fd2598ca 100644 --- a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f0821db5d8f2c42c25c1d48c5d0e99506cb337f0feecd391da00a549c509ebc -size 175905058 +oid sha256:d71d5acb4e51578ba23359dd1fe390ea3569f117955a97e5069814d6f9469d23 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..2ce808ef7d09fa654c9cd4a748d539e23be18fc5 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c3e8752613b50ca0cafa4398b3ae9bb0c3b172a1da01252d3fc8085bc8769c9 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/last.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/last.pth.tar index f35644bb6e4e3ed1e8c2a05f1349c289bfb10f9a..e1ab3104b9b225271e872c451202ea3f1f326278 100644 --- a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/last.pth.tar +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/last.pth.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2054782e80bd02154068d1de8cff456c3e6701dd838ac0905661ab7fdb8dac93 -size 175905058 +oid sha256:bf65d6117398c4a402a099dd0505b4410ff39b5085b9a0608ca2c78bf631d649 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/model_best.pth.tar b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/model_best.pth.tar index 30959a0bae4b44ab5019449ea7a2301e324705d0..d2a8a9e029dfc4c653eb8c644e44b690fc4c842b 100644 --- a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/model_best.pth.tar +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/model_best.pth.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5136f18f26b9ab453384f5daaf7b33738e2b3862907dc64f1a9d3d1d0e6811f5 -size 175905058 +oid sha256:b9ce9d633c40a741ada5078cf540efe2835cb39c9eee84c9e79d048560d1829b +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/summary.csv b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/summary.csv index c1bbf0588f9cbb1940f4310e24e8a1881207298a..9c58e100553abeea97d93c29d61a8c861f1ef4b5 100644 --- a/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/summary.csv +++ b/pytorch-image-models/output/train/ImageNetTraining80.0-frac-1over64/summary.csv @@ -1,126 +1,151 @@ epoch,train_loss,eval_loss,eval_top1,eval_top5,lr -0,6.9424285888671875,6.920432336730957,0.104,0.5859999998664855,1e-05 -epoch,train_loss,eval_loss,eval_top1,eval_top5,lr -0,6.942429542541504,6.920431951293946,0.104,0.5859999998664855,1e-05 -1,6.924147129058838,6.939195544281006,0.12999999986648558,0.5359999987220764,0.080008 -1,6.924983024597168,6.930478667297363,0.1419999999332428,0.5459999997329712,0.080008 -2,6.913643836975098,6.965729615783691,0.1639999999666214,0.6439999998664856,0.160006 -2,6.91254997253418,6.966077185821534,0.1639999999332428,0.6039999998664856,0.160006 -3,6.913957118988037,6.917989406738282,0.2239999999332428,0.9480000011825561,0.240004 -3,6.911498546600342,6.925382147521972,0.2560000001525879,0.9519999997329712,0.240004 -4,6.902523994445801,6.9011966004943845,0.2780000001525879,1.1620000013351441,0.320002 -4,6.9004034996032715,6.904172467346191,0.2759999999332428,1.0639999993133544,0.320002 -5,6.893914699554443,6.891897925415039,0.3840000000190735,1.3379999993133544,0.39890437907365467 -5,6.892399311065674,6.8884028283691405,0.3719999997329712,1.3239999994659424,0.39890437907365467 -6,6.873651504516602,6.879224338226319,0.3159999999332428,1.360000001335144,0.3984229402628956 -6,6.873078346252441,6.878396666870117,0.3259999998664856,1.3779999994659424,0.3984229402628956 -7,6.854273796081543,6.868862268981934,0.40399999900817873,1.4820000036621093,0.3978544665925977 -7,6.85452938079834,6.867062584075928,0.45199999973297117,1.4660000007629395,0.3978544665925977 -8,6.8329596519470215,6.862469421386718,0.5179999995803833,1.8139999996185303,0.39719920741410103 -8,6.835845947265625,6.865656077270508,0.473999999294281,1.7079999996185302,0.39719920741410103 -9,6.818224906921387,6.863049849853516,0.5200000000190735,1.6660000007629394,0.3964574501457378 -9,6.820562839508057,6.862609147338867,0.4459999990081787,1.5079999996185303,0.3964574501457378 -10,6.805999755859375,6.8592342980957035,0.5220000003051758,1.6960000036621095,0.39562952014676117 -10,6.808788299560547,6.864919125671387,0.4979999990081787,1.7440000022125244,0.39562952014676117 -11,6.785886764526367,6.866093442382812,0.4760000003051758,1.6520000007629394,0.3947157805746321 -11,6.7904815673828125,6.863632247314453,0.441999999294281,1.5840000020599365,0.3947157805746321 -12,6.76955509185791,6.858748644561768,0.5839999997329712,1.8780000009155273,0.3937166322257262 -12,6.7753424644470215,6.8594399725341795,0.5180000004577636,1.8840000009155273,0.3937166322257262 -13,6.759860038757324,6.860188617858887,0.5840000003051757,1.8620000036621094,0.39263251335953164 -13,6.763460159301758,6.862438941650391,0.5679999995803833,1.8519999994659424,0.39263251335953164 -14,6.742807865142822,6.867536324157715,0.5759999997329712,1.8680000010681153,0.39146389950641347 -14,6.747715473175049,6.8670238735961915,0.5359999997329712,1.7759999996185303,0.39146389950641347 -15,6.727993488311768,6.875554504699707,0.5399999995803832,1.8780000009155273,0.39021130325903075 -15,6.731235504150391,6.86877296798706,0.6040000000190735,1.991999997177124,0.39021130325903075 -16,6.722332000732422,6.881133583374023,0.5559999995803833,1.8939999996185304,0.3888752740474963 -16,6.726221084594727,6.878486652832032,0.5020000004577637,1.7700000036621093,0.3888752740474963 -17,6.706497669219971,6.885410415039063,0.6280000004577637,1.977999998474121,0.3874563978983783 -17,6.711371421813965,6.872441061401367,0.639999999294281,2.025999998321533,0.3874563978983783 -18,6.692277908325195,6.879464132843018,0.5399999995803832,1.8159999996185303,0.3859552971776503 -18,6.695293426513672,6.873998327941894,0.5379999995803832,1.9379999994659425,0.3859552971776503 -19,6.682459831237793,6.88767384765625,0.5780000004577637,1.8979999984741212,0.38437263031770014 -19,6.688074111938477,6.876273168640137,0.6499999997329712,1.9419999997711181,0.38437263031770014 -20,6.665689468383789,6.913064990539551,0.4499999998664856,1.6520000009155273,0.3827090915285202 -20,6.666996955871582,6.917340899658203,0.4919999999332428,1.6820000014877319,0.3827090915285202 -21,6.650964736938477,6.915457929382324,0.49400000030517577,1.7439999997711182,0.3809654104932039 -21,6.654986381530762,6.891571377868653,0.5539999997329712,1.8579999984741211,0.3809654104932039 -22,6.640132904052734,6.908824508666992,0.595999999294281,1.9959999996185303,0.3791423520478826 -22,6.6413679122924805,6.899364648132324,0.5900000003051757,2.0600000010681154,0.3791423520478826 -23,6.633153915405273,6.958054618835449,0.4300000000190735,1.698000000038147,0.37724071584624297 -23,6.633950233459473,6.935959024047851,0.4319999998664856,1.6360000006103517,0.37724071584624297 -24,6.6147871017456055,6.930119956817627,0.5299999990081787,1.893999998626709,0.3752613360087727 -24,6.616601943969727,6.925618092193604,0.4759999999332428,1.8340000036621094,0.3752613360087727 -25,6.602322101593018,6.926238089447022,0.5140000004577636,1.843999999771118,0.37320508075688774 -25,6.605362892150879,6.930243486175537,0.4880000000190735,1.8919999994659424,0.37320508075688774 -26,6.588551044464111,6.929627213439941,0.5599999998664856,1.9999999996185303,0.3710728520321014 -26,6.5905961990356445,6.926106202850342,0.5539999998664856,1.8639999981689452,0.3710728520321014 -27,6.576830863952637,6.9311284732055665,0.5779999990081787,2.011999998474121,0.368865585100403 -27,6.574466228485107,6.937102469940186,0.5559999995803833,2.0240000022125244,0.368865585100403 -28,6.56911563873291,6.939736550292968,0.5500000004577636,1.9100000049591064,0.3665842481420199 -28,6.568495273590088,6.931065178375244,0.537999999294281,1.9760000022125244,0.3665842481420199 -29,6.551608562469482,6.9594466165161135,0.489999999294281,1.7939999996185303,0.36422984182674084 -29,6.551344394683838,6.951459311981202,0.5839999995803833,1.9999999980163574,0.36422984182674084 -30,6.544393539428711,6.983676065063476,0.4799999990081787,1.8059999994659424,0.3618033988749895 -30,6.543179512023926,6.949964854125977,0.5020000001525879,1.8959999993133545,0.3618033988749895 -31,6.526889801025391,6.976573746795654,0.555999999294281,1.8960000009155273,0.3593059836048393 -31,6.524758815765381,6.959248918762207,0.6179999995803833,2.1579999996185304,0.3593059836048393 -32,6.51254940032959,6.981352221679687,0.5079999990081787,1.9480000038146972,0.356738691465168 -32,6.506269931793213,6.96813433303833,0.5399999998664856,1.960000000038147,0.356738691465168 -33,6.49548864364624,6.987286891174317,0.487999999294281,1.852000001487732,0.3541026485551579 -33,6.490726947784424,6.983131244506836,0.5199999998664856,1.9000000020599366,0.3541026485551579 -34,6.486472129821777,7.000174746246338,0.5079999998664856,1.8820000023651122,0.3513990111303513 -34,6.480923175811768,6.975136740570068,0.5119999998664856,1.9680000007629395,0.3513990111303513 -35,6.4696760177612305,7.040946520996094,0.511999999294281,1.8599999994659424,0.3486289650954789 -35,6.465641975402832,7.020086326904297,0.5439999995803833,2.05599999961853,0.3486289650954789 -36,6.4527997970581055,7.01885198638916,0.5260000000190734,1.9239999996185302,0.34579372548428233 -36,6.445886611938477,6.978989710083008,0.5840000004577637,2.0679999994659424,0.34579372548428233 -37,6.446316242218018,7.01972318359375,0.4839999998664856,1.888000000038147,0.3428945359265607 -37,6.437939643859863,7.0065891966247555,0.5200000001525878,2.032000001335144,0.3428945359265607 -38,6.426208019256592,7.046766372985839,0.545999999294281,1.9140000006103515,0.33993266810267314 -38,6.417410850524902,7.0542836224365235,0.5039999995803833,1.9380000014877319,0.33993266810267314 -39,6.408820152282715,7.030670360412597,0.5479999998664856,1.9499999968719481,0.33690942118573775 -39,6.400130271911621,7.028005226440429,0.6140000000190735,2.153999998321533,0.33690942118573775 -40,6.39302921295166,7.040058102722168,0.5859999990081787,1.9539999996185302,0.3338261212717717 -40,6.381618022918701,7.023664332580567,0.589999999294281,2.048000002365112,0.3338261212717717 -41,6.379606246948242,7.06195273223877,0.5180000001525878,1.9440000035095215,0.3306841207980211 -41,6.3661017417907715,7.044037224731445,0.5559999995803833,2.15799999874115,0.3306841207980211 -42,6.36635160446167,7.090167502746582,0.5340000001525879,1.8699999970245362,0.32748479794973795 -42,6.35398530960083,7.056457656707764,0.5979999995803833,2.029999999771118,0.32748479794973795 -43,6.348472595214844,7.123469321594238,0.4659999995803833,1.8380000007629393,0.3242295560556621 -43,6.336571216583252,7.127492211151123,0.5419999995803833,1.9700000020599364,0.3242295560556621 -44,6.325023651123047,7.110256091308594,0.5319999998664856,1.7980000009155273,0.320919822972475 -44,6.312655925750732,7.12368181854248,0.5279999995803834,2.016000000610352,0.320919822972475 -45,6.308741569519043,7.077106558074951,0.4739999999666214,1.9160000010681153,0.31755705045849464 -45,6.300881385803223,7.070175020294189,0.5499999998664856,2.0740000009155275,0.31755705045849464 -46,6.296248435974121,7.148223882141114,0.4739999995803833,1.7839999997329712,0.3141427135368864 -46,6.279409408569336,7.110462235717773,0.6159999998664856,2.0400000020599367,0.3141427135368864 -47,6.277801036834717,7.154966416625976,0.5019999995803833,1.8619999983215332,0.31067830984866884 -47,6.266246795654297,7.106363173217773,0.5200000003051758,2.138000003662109,0.31067830984866884 -48,6.25830602645874,7.132344109344483,0.4900000001525879,1.8379999993133544,0.30716535899579933 -48,6.241935729980469,7.111315277709961,0.5839999990081787,2.1160000022125245,0.30716535899579933 -49,6.237907409667969,7.184416811828613,0.6059999995803833,1.9619999994659423,0.3036054018746261 -49,6.223014831542969,7.148842007446289,0.6859999995803833,2.155999999923706,0.3036054018746261 -50,6.222013473510742,7.240679753112793,0.4939999999332428,1.742000000038147,0.30000000000000004 -50,6.206821441650391,7.2021551620483395,0.4920000000190735,1.940000000038147,0.30000000000000004 -51,6.204699516296387,7.222397875366211,0.5100000001525878,1.8620000023651122,0.2963507348203431 -51,6.186595916748047,7.180378330841064,0.6179999995803833,2.166000000038147,0.2963507348203431 -52,6.184700012207031,7.211927671203613,0.5459999995803833,1.8000000009155273,0.29265920702397236 -52,6.166909694671631,7.200252453308106,0.6160000003051758,2.1300000007629394,0.29265920702397236 -53,6.163850784301758,7.244989415435791,0.5300000001525879,1.923999999885559,0.2889270358369855 -53,6.148956298828125,7.217050355224609,0.5759999999332428,2.1340000006103517,0.2889270358369855 -54,6.133647918701172,7.2110090284729,0.5520000003051758,1.9820000022125244,0.28515585831301454 -54,6.116785526275635,7.218511594238281,0.5980000001525879,2.1920000009155274,0.28515585831301454 -55,6.124849319458008,7.263096175842285,0.5139999999332427,2.0100000009155274,0.28134732861516004 -55,6.103966236114502,7.237118853302002,0.5680000001525879,2.0800000006103514,0.28134732861516004 -56,6.108697891235352,7.27550312423706,0.5579999998664856,2.0100000023651123,0.2775031172904206 -56,6.0816826820373535,7.2774611859130856,0.6359999998664856,2.141999998321533,0.2775031172904206 -57,6.081491470336914,7.294188733520508,0.5079999998664856,1.8999999996185302,0.27362491053693566 -57,6.054277420043945,7.278627499237061,0.6059999998664856,2.0720000020599367,0.27362491053693566 -58,6.066103458404541,7.319130179901123,0.5239999995803833,1.8300000012207032,0.26971440946436304 -58,6.040961265563965,7.289313296051025,0.6180000003051758,2.148000001373291,0.26971440946436304 -59,6.030848503112793,7.330989106445313,0.6119999995803833,1.9559999994659423,0.26577332934771664 -59,6.0001702308654785,7.321165827178955,0.6159999995803833,2.2880000036621095,0.26577332934771664 -60,6.0048723220825195,7.350476207122803,0.4939999999332428,2.033999998626709,0.2618033988749895 -60,5.973123550415039,7.328089238891602,0.6299999997329712,2.2060000010681153,0.2618033988749895 -61,5.980533599853516,7.326227387390137,0.5220000003051758,1.9199999996185302,0.25780635938889435 -61,5.958221435546875,7.279663868103027,0.5780000000190735,2.1420000038146974,0.25780635938889435 +0,6.942429065704346,6.9204323419189455,0.104,0.5859999998664855,1e-05 +1,6.923966407775879,6.9362698457336425,0.1179999998664856,0.4979999990081787,0.080008 +2,6.913142204284668,6.952643440551758,0.1499999999666214,0.6420000001525878,0.160006 +3,6.911567687988281,6.920265467529297,0.2219999999666214,0.9499999993133544,0.240004 +4,6.90109395980835,6.903558749847412,0.28600000015258786,1.1040000006103516,0.320002 +5,6.893125057220459,6.895214747467041,0.4160000000190735,1.3660000007629394,0.39890437907365467 +6,6.873154640197754,6.880936201782227,0.3179999998664856,1.3300000011825561,0.3984229402628956 +7,6.852287769317627,6.8721664320373534,0.3999999995803833,1.4799999993133546,0.3978544665925977 +8,6.831615447998047,6.864757053833007,0.501999999294281,1.6720000023651123,0.39719920741410103 +9,6.816365718841553,6.8707130639648435,0.501999999294281,1.6100000023651122,0.3964574501457378 +10,6.803730010986328,6.866861535186768,0.45599999929428103,1.7320000010681151,0.39562952014676117 +11,6.784073829650879,6.874440485076904,0.4919999995803833,1.61199999874115,0.3947157805746321 +12,6.767500877380371,6.867647185668945,0.4780000000190735,1.8540000009155273,0.3937166322257262 +13,6.758795261383057,6.8665748153686526,0.5939999995803833,1.8480000022125245,0.39263251335953164 +14,6.741405487060547,6.876222854309082,0.4819999998664856,1.7439999994659423,0.39146389950641347 +15,6.724390029907227,6.879548714904785,0.5759999990081787,1.9560000036621095,0.39021130325903075 +16,6.720141410827637,6.885081752624512,0.5560000000190735,1.8020000010681152,0.3888752740474963 +17,6.704622268676758,6.889008323059082,0.6379999995803833,1.9500000009155274,0.3874563978983783 +18,6.6893815994262695,6.882255678863525,0.5620000001525879,1.9139999994659425,0.3859552971776503 +19,6.68095064163208,6.889276123657226,0.5499999998664856,1.8120000038146973,0.38437263031770014 +20,6.66105842590332,6.9256472277832035,0.4559999999332428,1.6200000007629394,0.3827090915285202 +21,6.646844863891602,6.910552183380127,0.5380000000190734,1.7760000009155275,0.3809654104932039 +22,6.635743141174316,6.907131293029785,0.5659999998664856,1.9679999996185302,0.3791423520478826 +23,6.6296820640563965,6.956730718688965,0.4319999998664856,1.6659999994659425,0.37724071584624297 +24,6.611444473266602,6.94983899810791,0.4699999995803833,1.7820000009155272,0.3752613360087727 +25,6.598127365112305,6.938770271911621,0.4920000000190735,1.7940000009155272,0.37320508075688774 +26,6.586238384246826,6.935309044494629,0.5539999998664856,1.9259999996185302,0.3710728520321014 +27,6.570889949798584,6.936730230560303,0.5959999995803833,1.9640000009155274,0.368865585100403 +28,6.5616774559021,6.946173254699707,0.5799999998664856,1.9379999996185302,0.3665842481420199 +29,6.546475410461426,6.967161661834717,0.5160000001525878,1.7759999994659423,0.36422984182674084 +30,6.539191246032715,6.994973771057129,0.5259999995803833,1.8019999993133544,0.3618033988749895 +31,6.519817352294922,6.982266851348877,0.5579999995803833,1.9060000009155273,0.3593059836048393 +32,6.504195213317871,6.982266543426514,0.47800000030517575,1.7660000009155274,0.356738691465168 +33,6.486875057220459,7.014854125976562,0.513999999294281,1.8100000009155273,0.3541026485551579 +34,6.4781341552734375,6.998830995788574,0.5199999999332428,1.769999998588562,0.3513990111303513 +35,6.461017608642578,7.034900197753906,0.5459999995803833,1.8620000023651122,0.3486289650954789 +36,6.444275379180908,7.013399291229248,0.5359999995803832,1.9120000007629394,0.34579372548428233 +37,6.433405876159668,7.047976176147461,0.501999999294281,1.7959999998855591,0.3428945359265607 +38,6.414316177368164,7.0445391026306154,0.4860000001525879,1.8820000020599366,0.33993266810267314 +39,6.398417949676514,7.047067463378906,0.5679999987220764,2.0300000012207033,0.33690942118573775 +40,6.379634857177734,7.057349690856934,0.5539999990081788,1.9120000022125243,0.3338261212717717 +41,6.36372184753418,7.092856681518555,0.4659999998664856,1.8100000036621093,0.3306841207980211 +42,6.350620269775391,7.07941443359375,0.5439999995803833,1.8820000010681153,0.32748479794973795 +43,6.331295967102051,7.181488526611328,0.481999999294281,1.7760000009155275,0.3242295560556621 +44,6.310930252075195,7.115806886291504,0.5099999998664856,1.7980000007629395,0.320919822972475 +45,6.293801307678223,7.127949407653809,0.4679999999332428,1.8420000007629393,0.31755705045849464 +46,6.28037691116333,7.143219479827881,0.5780000000190735,1.7720000006103516,0.3141427135368864 +47,6.261784553527832,7.136406854095459,0.4499999995803833,1.8519999970245362,0.31067830984866884 +48,6.237654685974121,7.146573718566895,0.5220000001525879,1.9220000020599366,0.30716535899579933 +49,6.2231669425964355,7.170018084716797,0.557999999294281,1.9460000007629394,0.3036054018746261 +50,6.206787109375,7.232628859558106,0.4420000001525879,1.7620000022125244,0.30000000000000004 +51,6.1882219314575195,7.208994526672363,0.5480000001525879,1.9820000007629395,0.2963507348203431 +52,6.1664276123046875,7.213443625488281,0.5300000001525879,1.8580000006103516,0.29265920702397236 +53,6.143184661865234,7.294562321472168,0.4979999999332428,1.8799999993133545,0.2889270358369855 +54,6.121213912963867,7.273409919128418,0.5119999995803833,1.8939999983215332,0.28515585831301454 +55,6.105893611907959,7.3219014178466795,0.4939999998664856,1.8620000036621094,0.28134732861516004 +56,6.086548328399658,7.293889065856933,0.5139999998664856,1.8939999981689453,0.2775031172904206 +57,6.0603485107421875,7.289929358978272,0.5100000001525878,1.9860000020599364,0.27362491053693566 +58,6.043700218200684,7.345000336608886,0.5120000001525878,1.8919999993133545,0.26971440946436304 +59,6.011569499969482,7.3328267753601075,0.4900000001525879,2.018000001487732,0.26577332934771664 +60,5.989280700683594,7.411293219299316,0.5179999995803833,1.9379999983215332,0.2618033988749895 +61,5.968611240386963,7.296598130645752,0.5340000003051758,1.9340000007629394,0.25780635938889435 +62,5.950004577636719,7.345550286102295,0.5079999995803833,1.978000001373291,0.25378396412305315 +63,5.9150309562683105,7.393911924591064,0.5600000001525879,2.0739999980163573,0.24973797743297094 +64,5.893589973449707,7.445621807098389,0.5339999995803834,2.0740000023651124,0.24567017402213118 +65,5.865739345550537,7.4768023846435545,0.4779999999332428,1.8280000009155273,0.2415823381635519 +66,5.839940071105957,7.555699309539795,0.5680000000190735,1.9560000007629395,0.23747626291714496 +67,5.810744762420654,7.518662259521484,0.5200000001525878,1.9900000020599364,0.23335374934322048 +68,5.810274600982666,7.535615730285644,0.5340000000190734,1.9380000014877319,0.22921660571248237 +69,5.748648643493652,7.519405028686523,0.4999999999332428,1.9979999981689454,0.22506664671286086 +70,5.727104187011719,7.536212543640136,0.5799999990081787,1.9539999981689453,0.22090569265353077 +71,5.705385208129883,7.639699801483154,0.4699999999666214,1.873999999885559,0.21673556866646312 +72,5.678601264953613,7.559548147125244,0.504000000152588,1.9299999993133545,0.21255810390586272 +73,5.637502193450928,7.687355150299072,0.551999999294281,2.0479999987411497,0.20837513074583996 +74,5.604036331176758,7.665780875854492,0.5319999999332428,1.9979999980163574,0.20418848397667141 +75,5.577176094055176,7.750660203552246,0.46599999900817873,1.8880000006103517,0.2 +76,5.542241096496582,7.715435692749024,0.49400000030517577,1.8840000007629394,0.1958115160233287 +77,5.516667366027832,7.849578760070801,0.5479999998664856,1.9899999994659423,0.19162486925416009 +78,5.4770097732543945,7.812740891418457,0.5459999998664856,1.9680000010681151,0.1874418960941374 +79,5.444944381713867,7.819992991485596,0.5580000003051758,1.9900000010681151,0.1832644313335369 +80,5.403650760650635,7.837033017272949,0.5839999997329712,1.9539999981689453,0.17909430734646936 +81,5.374050617218018,7.912339072570801,0.5719999997329712,2.0260000009155275,0.1749333532871392 +82,5.329314231872559,7.862462864990234,0.5940000011825561,2.0459999981689454,0.17078339428751776 +83,5.295529365539551,7.927786674499512,0.5700000005912781,1.9960000036621093,0.16664625065677952 +84,5.2421345710754395,8.003273767395019,0.5759999998664856,2.057999999771118,0.16252373708285509 +85,5.206167221069336,8.030916482696533,0.6300000003051758,2.0500000020599365,0.15841766183644812 +86,5.161040306091309,7.990317549285889,0.6059999995803833,2.1880000007629397,0.1543298259778689 +87,5.1082844734191895,8.235559090576173,0.5319999998664856,1.9699999981689453,0.15026202256702909 +88,5.082700729370117,8.092807864379882,0.5119999995803833,2.0500000009155275,0.14621603587694693 +89,5.0340070724487305,8.139680226135255,0.5379999990081787,1.8779999983215332,0.14219364061110565 +90,5.002180099487305,8.170847958068848,0.5759999990081787,1.9720000007629395,0.13819660112501053 +91,4.960944652557373,8.27602088470459,0.5320000003051758,2.082000001220703,0.13422667065228336 +92,4.917640686035156,8.260481964721679,0.5339999998664856,1.9640000020599366,0.130285590535637 +93,4.843186378479004,8.279635296936036,0.6079999995803833,2.016000002365112,0.12637508946306447 +94,4.8030877113342285,8.340834577941894,0.5579999998664856,1.9660000038146972,0.12249688270957942 +95,4.740231037139893,8.390814448242187,0.5079999998664856,1.9779999996185302,0.11865267138483995 +96,4.696559906005859,8.47701716583252,0.4980000001525879,1.9739999996185302,0.11484414168698548 +97,4.644405841827393,8.394728535461425,0.5759999995803833,2.0900000009155275,0.11107296416301456 +98,4.586411476135254,8.534123056640626,0.503999999294281,1.9259999996185302,0.10734079297602771 +99,4.547860145568848,8.511308373413087,0.5659999999332428,2.012000000915527,0.10364926517965692 +100,4.497271537780762,8.506303272094726,0.5599999995803833,2.0159999983215333,0.09999999999999996 +101,4.42962121963501,8.551114705200195,0.5760000000190735,1.9879999997711182,0.09639459812537399 +102,4.382665634155273,8.661531132507324,0.5760000001525879,2.1499999994659422,0.09283464100420072 +103,4.338657855987549,8.54982807647705,0.5379999995803832,2.048000002365112,0.0893216901513312 +104,4.277405261993408,8.986014337463379,0.5359999999666214,2.0160000009155272,0.08585728646311369 +105,4.214815616607666,8.762382994384765,0.5739999998664856,1.9979999996185303,0.0824429495415054 +106,4.1493000984191895,8.745830765686035,0.5580000001525879,1.9180000007629394,0.07908017702752504 +107,4.11440896987915,8.912276128845216,0.545999999294281,1.9679999968719482,0.07577044394433795 +108,4.035670280456543,8.875556287841796,0.6080000001525879,1.9999999994659423,0.07251520205026206 +109,4.011588096618652,8.795275288696288,0.5920000003051757,2.160000001068115,0.06931587920197897 +110,3.9391818046569824,8.872440106811524,0.5260000000190734,2.0060000023651123,0.06617387872822836 +111,3.8946266174316406,8.965825346069336,0.5640000001525879,1.998000000038147,0.06309057881426226 +112,3.847123146057129,8.833626903686524,0.5839999998664855,2.1119999983215334,0.060067331897326895 +113,3.781787395477295,8.948263769226074,0.6139999995803833,2.1079999987411497,0.057105464073439374 +114,3.742607355117798,9.017952476196289,0.5660000001525879,2.0440000009155272,0.05420627451571774 +115,3.6985316276550293,9.044272632446289,0.5680000003051758,1.9839999981689453,0.051371034904521135 +116,3.659961700439453,9.003668550415039,0.5459999995803833,2.0960000009155273,0.04860098886964875 +117,3.5988922119140625,8.977717391967774,0.6479999995803833,2.0599999994659424,0.04589735144484217 +118,3.527200222015381,8.984568173522948,0.5679999999332428,2.083999998168945,0.04326130853483206 +119,3.4911253452301025,9.098649426879883,0.6159999999332428,2.1739999994659422,0.04069401639516075 +120,3.4388296604156494,9.024455751953125,0.6339999995803833,2.032000001220703,0.03819660112501053 +121,3.3956265449523926,9.004373880615235,0.5699999999332428,2.10799999961853,0.035770158173259195 +122,3.370452404022217,9.07261521057129,0.5699999995803833,2.1100000010681152,0.03341575185798012 +123,3.3147196769714355,9.037646295471191,0.6299999998664856,2.0739999981689454,0.031134414899596986 +124,3.280813217163086,9.013615391845702,0.6179999995803833,2.0080000010681154,0.02892714796789868 +125,3.2557082176208496,9.113414707336426,0.6179999999332428,2.1200000022125245,0.026794919243112305 +126,3.1890621185302734,9.104899918823243,0.6359999999332427,2.0439999997711182,0.024738663991227285 +127,3.1820688247680664,9.122456697998047,0.5959999998664856,2.074000001220703,0.022759284153757053 +128,3.1438777446746826,9.017142121582031,0.5959999998664856,2.1439999997711183,0.020857647952117465 +129,3.1235992908477783,9.076268911743163,0.5939999999332428,2.1400000036621094,0.01903458950679613 +130,3.085062026977539,9.11533298400879,0.6099999998664856,2.1539999999237063,0.01729090847147985 +131,3.0774424076080322,9.043501282348632,0.5879999998664855,2.08399999961853,0.015627369682299875 +132,3.037274122238159,9.064817224731446,0.6119999998664856,2.1260000010681153,0.014044702822349731 +133,2.993844747543335,9.032037932434083,0.6219999995803833,2.101999999771118,0.01254360210162171 +134,2.9893879890441895,9.064460057678223,0.6039999998664856,2.0940000025177,0.0111247259525038 +135,2.961143970489502,9.09548891571045,0.6399999999666214,2.053999999771118,0.009788696740969294 +136,2.9593677520751953,9.10189620941162,0.6139999998664856,2.09999999961853,0.008536100493586551 +137,2.918013095855713,9.065079687194824,0.5919999998664855,2.1279999997711183,0.007367486640468379 +138,2.911527156829834,9.076765984191894,0.5940000001525879,2.07599999961853,0.0062833677742737855 +139,2.907723903656006,9.085828477783203,0.6339999995803833,2.1440000025177004,0.005284219425367942 +140,2.882197141647339,9.059706039733888,0.6199999995803833,2.133999997024536,0.004370479853238885 +141,2.8683888912200928,9.09216304321289,0.6459999998664856,2.109999998321533,0.003542549854262278 +142,2.8495898246765137,9.062279708251953,0.6320000001525878,2.1300000025177,0.0028007925858990037 +143,2.8448829650878906,9.076571819458009,0.6199999999332428,2.109999998321533,0.0021455334074023335 +144,2.8603718280792236,9.076863250732423,0.6080000001525879,2.139999998474121,0.0015770597371044472 +145,2.8671438694000244,9.076548695373535,0.6119999998664856,2.137999998321533,0.001095620926345342 +146,2.8295481204986572,9.058175260009765,0.6180000001525879,2.135999998474121,0.000701428150099126 +147,2.825033664703369,9.073507239990235,0.6299999999332428,2.123999998474121,0.00039465431434568824 +148,2.834763765335083,9.061160872192383,0.6239999999332428,2.137999999771118,0.00017543398022832336 +149,2.8274147510528564,9.07491242126465,0.6199999998664856,2.124000001068115,4.3863305030900085e-05 diff --git a/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc index 50ce7c4f01d666aef190d10c206f74276d30e46e..cad29565e61d35f5a32366248149eca0a1d07b99 100644 Binary files a/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc b/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc index 3c621c33d8846925d3f3cefb8ba4e6956ba6cd5f..ba3f3e50404b267100fe8e54439b48416f81b3b8 100644 Binary files a/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc and b/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc index 21c1a1d0e7f2f6562bfb9fb3cb21186c694ebcc4..e39733502f7943be01f2d5245c2865ee4029065d 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc index a5166fa1ecc583baf69604961378fb14c76eca3f..f34806705c2372ae66824b206003592080343772 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc index 15a5d34ae96e3a3fa9b88e9676e6854d8b6d2ba7..026f9b78a7857db1c4fa98385a91dd7008ef88b3 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc index dfd6954498f13cd1419dfcade03491febc1db587..8feec47c5bb85690d3c0d537aca7ac98fa6e891c 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc index 49b344a38fc7297eedf16ddc78441b1f39e0fa4e..ccce1641a33e2438c7cc1bb0e8e0087beaebb959 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc index 218e395eb997ae90a0693a02150f2455bce99b98..a846adf8c75a68d62947e73a179126c697d9589e 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc index cb16cee3ccfa57ee3064cd99c97525e7ac12f5f2..5806955902923f2085f001fa510057899ab27120 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc index b8d361dbb141b210f6ca9c127b01c58d43892956..18cc9ea59d009af56e464eeed710db1e123f9241 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc index 8f2e2cf725746edad79faca1b72271d27178216b..ee5704af7572dafb5f7aa40a1c6123c28efbe48c 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc index d351287df6a604370e02571ecc9c96355318d14c..f3de2a65c02b8a4f35bfede9d090bbb277f91043 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc index 0cdd29fd0a6f03a8b9f1acba1a86f187486aa9ce..bc55229e112bd76aea2454ae3ef2fa82a970b4d3 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc index 221cc53c0d48744ab16e55f05b99587ac076f7f2..5f5dfc180b9067b2df1d096acb2b6484a3761691 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc index 11b11ddadab1d7654334eac79a6c7ccc1989b86b..267843edb71761afdef2e1e88f59cecc56795b55 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc index a635a9984df3393db339252122f76ccf70050c30..d214cf16469b89c43727149abd8b36a0c1d3ec89 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc index f76baecc193bd98f25cae15ac0211ef74c414cb5..4b3bd75323482b68d64f4460f8fff05ee3021a84 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc index d55b0f00d343bfcad9555ad6980e59faf4767722..311d76f02ff1968a9b1fc5cff699830d58279ba7 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc index 2a51ecf0e2547eef7c62b3269e18cbb31ee4bfa8..a73d867eb32429a0aac6f3e727c375fdfb53a0ab 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc index 6a1feaa0d415693dfa4319106966e2f691fd3447..485127ba8f0a5c85a74347b562e6aa1fac3d50fc 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc index ce3e5aeac7cd888e688b7a3170881c774b90db33..13013443d999d9a4d5f83a917bf572725b9925a8 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc index 41652ddc023fdfe13ad3c6235a61c1b4923614a5..f73b90b1eea082c502790a98ba481904987e433a 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc index 69af7ed097b40f2de8ddadb54340f846bbacaa1a..8df65abc87fe6aa3aa2cbfdc2aeef0699b6b60ce 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc index 9842e0ec987889503907e836df3df8feb09638cc..5fe1d6a7b18a397420d2811100359ca0d0b54a12 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc index 5fa9b32753ab2421a73113b5c46e52000d83234c..24c39ea1e0394dae4968315f6107f3ab2d26a021 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc index 7441b6cdf956b12e66f46afc28f760d9c771bb99..06c24e3edf25f734c5564356630edb72ef15972d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc index cc4e9de88d4e08ada6f91bda4210c343b43e1a7a..e4cd9efd1a9589f4f984ea31afeee14370179bdb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc index 02187f9a2f498e7ee20dbd7ff5e464738a9d032b..7b3c4b8f0894da1988115b229719ee3ee9a395bb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc index b962385d03c871e7e007bc4af0d62a543aaf7d5a..3dfd7e1fac3001b9c5898fa2ce1ad1d0f02be86f 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc index de20185c72f5e575e8568d637641226cb996cdae..3d24c9f71bee9ecbd6fc5525af4f2e5b14e25376 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc index 4b0d5bf34393f63ea86fea8f93c79cc53788b1c1..70218bbd2c97c44e5a054b81715ec8777350b2a3 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc index b97327ad601f0e2a4067eb9c4f152841b2590b2e..1a1934a7e6c71b66ba3858028e66437b1a1b89b5 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc index b122c9ba8b0dac3abd6a4d82f70983065b678ec3..1fc370d4b78be2ede6f386cce0c832127b15acf2 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc index a2de2ed333898644f6aa15de06ada5ef5acdedeb..1cd4a9ae64d61910bf92063b9fe7324d3a42e7bd 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc index 75bc66545a2bfdcef5f0ce9cf060840c1778d888..8f713a9291945d87e5b77711d737bae1989cd26b 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc index 8d3a7d94a4d02f41a6eb2b443bf4a7bd6672b011..aaa30f8ddeb5b2dcfae5581e31d0e91541e5a73b 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc index 62e93d74bd48f5a5324d253945c6a852d00ccd82..226673ea19ff8cc64155a5f62cf119f453b9104a 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc index 9ce9e7a19cf73da2f9445e72b690171cb6f83fe0..cfc77eb561a8928ff3bf0514b540cf216c6a6cdb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc index 94ddbff9177a23987de053a6143f7102722f2264..e9b11660fd11a68d468d2406431a5fe12876724b 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc index cbf5e2a0ff6784631d7f6071590c864cc53849f9..9a24a4dc4ddde19b7a71b1172d3fccfea75b9062 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc index 4305c21b8a6b77449dd31375adfc7e22a6e29c34..405d5aca32399743321a89ea156d1c48aead8dcb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc index 239aee70df977cc401dd06ece2a36db88c6b128e..4e50a1c57db43a2d33b3e2af3d7f77086bb4c619 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc index aae206501c073fe2fcc845d4b857ced310ae6b3c..660fefdca4c3cafdd39da5eaa55ba661d028629a 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc index cc7f97f82694a4d31b85a15f91f2b5256b3c84aa..f0592de0f86586c7471f7070ad278d1f497e9c1d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc index 27be7aeaabf9a496b7643404f37b58042a1b33eb..2f6ee46504c0b21856abfc7babe39a3d24dc4716 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc index 3b5b92f64fd74fb22a15d1656d9ee39f5335a521..be1caafd816e2c2856dc1dce04b95a0dcc3c3ebb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc index 72a8ddbc0e2174f08b13753bd1aae9088dea2964..2d7d47a671fa77424433a4ce54a7d51af55b77f2 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc index 68d3313fdd98b7fbff81888f11c11ba53fbf444a..bc0a79c949ea7978933324c89cc483a7695b2621 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc index f3af388f129547ae8cf814909ecc1426df2ff25c..3b421300840564856bdac0dac591ac02ddf1cda3 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc index e636948abd08149f105efa3b3c471831f2dcae88..d2e0969af425fd12672ea68828c73b4f04a4c589 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc index e7681f15ac97da7de7bf22d5466a609876a62b6d..9a53097b74556d57a4232834bae0610ebcf42b57 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc index 00e5f82835b506cc8bfd9b1fef780d5f4c462de3..ab0673c95cc3c69c5c209a019891b84159549ed8 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc index 67792f926144300abca6c7a3b2e2672cda28db62..fbc6933efe184e3e21be327cf6ed81ed9b2dde98 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc index b2415ebdf41e59460908533bfd771e9c97f12e85..fb361db8effbb5144a5f3a0e545dca03422ffa55 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc index f0b2966d2b410eb9f21f0852fad4dcd301783783..709854938bdf9026c3da61ebd738f0664416b9e8 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc index 8e2e245182015172a115f07d8b1060e77e4fb394..f54e58cbe0735bb819c16287a1de685289d44147 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc index 98b8fad77b5a868809406345d3b78bd4bffeb17d..82a86a38540260ea63c008afc9ea603107efd891 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc index 60d296e75c2d2b7a46c6792a4940def3c7bb7eb3..b3e750e5b6d13e4c7ac4c1fb65146ea978f2851a 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc index 4cfd0fe16697df238dc1da2d57b2f36c2306aae4..28fae6380e086bd37a831c35cb19327c50276e17 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc index d4c28da18f8bde1c69665a265ea0b0e0e0ed2f90..551f7e07a834166a2db5decd906e760e663ee105 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc index d05b5fa5d31d55fe588c8ed590602766b6708f56..d3fc6ce0b4a86df309a5b025b593d65a95f09f96 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc index 3786990c64d65cf89d89e7634ac5942a60aaffca..c5938c43eeb1704b5b09bae23fbabf2d86fb2416 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc index e497cb77570a1d1d65c06d6591249ed4097155b8..bd01a5faa3f2b8af496fe8df657e0d9463b45919 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc index 915f370aa262aa66e6b1d4a2238ed58e0c4aefbc..e4a906a4c7109f40706d49414226fbb06cfd0914 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc index 003df55a2fa52ada8891ada1f44563ee7c5e49ee..86a14a0992503d7ebc4f961f99c82e429afc62c9 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc index 953299b00864680577f59a050bffac364c16ad03..afb60a86476f91e5a37c59885e7ed2a4d0783c54 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc index fb977c64f6a7b1bc13d56100256106a81f30912c..b23097b51820da178f713b9768c29c34cc44e753 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc index 9a867b836f386117c22d05be19e0a052e10afb5c..d6f13da178dc01aec44f7fc59366e1fdc1189259 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc index 1b5c186772eedfc703b379ebcf7648bf19ddfec9..dedaf0e2ec55a1f61709f1cc1f8b0be414431140 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc index 54c84e55762beb316b8b0fc2b0d9308160235100..b895862f7e7ed19ea23c817e03f5547c850d5c41 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc index 5d7327d918569db40abf12f9d14195ecedf2006c..66a2ec546a71aca571ad382ae24e701a64255677 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc index ff40363c04a5f98d41238810842e23721655a8f7..14f750dca52788f9d1168d717e1bff545f6c6c63 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc index 4e1171e807e365db60e2330e99ed93544b62f43e..dc157950f1f27c8aabf2b2625111dd9ffbc9552e 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc index c51906dc102cca44c7df33261bb27b45f5eaf306..f82500dae58f1b34045f361710e6a4d806ebbc27 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc index 24aa2004498615ea5a3e68a8c37fba7379100e60..e43e7a457f06be011e05b5092b8f5b7deb57d001 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc index d5a81ce64c1261ec21ebf1f1350bc05002068e56..0f178e2b9ebe5605165281e55bdaa2658c01e4dc 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc index ab6e4a695746db0afef335eb39b4b049509d8999..003b7521757b86acdea1352f19f9978c370cc6de 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc index d28a489bfe968eeb61e9c78eb304d52132e8be99..d03ad935087e3ecb723683ca9836bb3d5fd3ae14 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc index e1461c04421f6abad29527d58e6b4d2fcc9eb798..8d2c9b1726c1d5b44c08df452cca183a1452ef20 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc index 411330b5ac2180ca7cc4c4cd492d5af93dc75829..f9efe2338477c55ccb8a649d5443c32d00b30634 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc index c8a45a801d397b2e20dbe06162ce396c1515fb55..31ea881d3059618efb91badb244c8b9dab453115 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc index 4ffa2cd8c0fe792f8787da7c9d811c1916ca3a13..26527100220f0b8bc23da5f9cdc977fca859c8bb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc index b2670024be515d7135f6d48a53b3cb213a67f91e..6cfd872395ed6efe552f8212c1b6e5e340bdf85d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc index e2bc843b60f26ca4d0f7930fe68889996eb60553..6bad5ee37bcae9ee21a356f509dc8d9d0406e098 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc index 977ee58469775194bdb8c7ecc9f84fd3bdb653cb..9d9a9e81d65d7e270c3ba5760f7a19fae7c48840 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc index d6bceee46b20a11605d0429c08812f8c95a37afd..b6bdf31a3c1c228ff2c1b20505664176a5bdf8b1 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc index c2609a47e967a0382d9598ebce59f750f7cad3ea..0dc07f02dd7124b21a4b9649c41577d20fb58c88 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc index 8806be53f8f65820cf4d451e5a39bc91b3fe68ba..332f78527aa1899ad08a156b7af6c26fcc584706 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc index 6c7873b292979cf159270e95f0d57bd41825e4a6..6e7f9652500ecf7b04b8899be233df961bcf7442 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc index 7edeb0514c47f49a9a74ffcabc2929c80ac362f8..642642c055c6890e068fad754a6f75fce4e20476 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc index 539d34de0210cb792dd38a011b83d8925e97d8a4..57c5877fc16244ab4ae32ec769b6833adafebe2d 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc index db6b8c44cf5cf2e064c5638aa85a3e53476e1995..0523647b7340f6d758b2f14d746b42894605b6da 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc index a6d68494f14f013ed275fde641496dd92f821b50..7ff584a11a00086754131c38e4c9be5a7bd65197 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc index 13c92fa203a91a89de43a1ca7a120b5fb7caed4b..40de94862e55ec3ab98ae2a768f97b64d2b4a8b9 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc index 51f67fda2908056caf03e6a4913e896593b75e6e..16afbeb1166996c51d59c807baf11152972974cb 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc index 15c47419883b496c35338fff390a65bcfcc81772..dd38d229b5289b943299b530f4235b41c0ac71fd 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc index a6a10754e8b5171ef15bdf4c598c175d9486813c..4fa5f1903d9bc9154022ed2d83c89bd1aef23f79 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc index 21a3d607d4cd724eef30d7bd38667fd9271fbb9e..e78de07920cd4f397fd1885fd55427d414948818 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc index 9d206e0a4a468b4a972e3d9f7e21a9ef0cd6e607..a7ef614c03125eb28648db8174091e36f06eb2be 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc index 506e62a66881eb3ff53652cf535f095736ae1e5a..078445bb43dfb826e6ba41d31af533ea15a8f005 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc index 447228675924f96aa932375f419fe193f833a293..7bcb3c672604612ac084e660f58047d5abfdbfcd 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc index d8606842e2e1b7af773c9f7acd8212cc62bc32aa..0531512d4850c3d3b9c02dd98dda14e56a0ece25 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc index 429f42a1fc30b5b5e045bb1bd4935db36d9c3333..dd96fd795b2ef9a8f501a5d1c73f183abece4998 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc index fbf64566ac2e517a56b352f2d4f935336a411499..a702ab2e37ffd22741fbaaa0819e1dd8cf924970 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc index 35d0b4f1e4d163bb2df7b20b91bb6630939f2849..cffcf24cebf88ed64e589644fc2d12ea6552f9bb 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc index 5af9f8a456526fcad611275be1d4dc2d0c525f70..3d3a604ba960879b83871e9308883e646c7e428f 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc index fb86f5f2ef49b2fc68e6803178595fe259162c8e..183df7aa52f9e08adaf94e6edfd6e354875855e1 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc index 41dc7cdf215653b526cbb88b330b5901ada556f9..1e3e114be18e5c8930efa17ce533cb7e504bbc89 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc index 3c8f5f0fea795c1e3962bf8fa559b0a908c04393..797e583b0d18c4e82e0a2c91f6aa9e9aea2e0c6d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc index 137f71c705bcbf555d8011586611091c62cd96cd..1576a60aabb793f4c2c31313adc25c4b9c956edb 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc index 5ae162f918850796c8576ff56c65129239293829..a6883cc77d022b0a800619b1b975fdbd1a3d55a7 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc index afb79999f3964bcc98396c66ae4b8b804aeca760..15ef992cac6c2439ea324ccbd7edf5da614efa6b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc index 515bf2104421e5f03e997b08f27d02b54000c750..135617c275c99e8c23c7864511d605cad56f745f 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc index d3af44bcdc07ef65d8c4b765f8efe6dab1ebc07b..a1a33fd97d99585b5e78a23f7c8258b7897e8900 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc index ba3d3875366d84a8a1d83961eaabe37012bbea98..2f21de178231eca35d51b0c2eb3d8d5dc944fb2a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc index fedc3aff39c91a566fc8efdf2d015e725e86c3bf..8445e4bcc46761eb0d6497657fd62c8e827feba5 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc index fafa237b94824f1c91cd5d84d8b3868b271c58d2..4ca0f602c19a6dafd865437fab9672e7c71309a9 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc index aed8036be1ca0f672f3f687c2bf010d656d782f4..110c0f58d3915b84faa04f77786fbfe54e111990 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc index df571644d1212ff17a3e7225f6ad752efe13ec77..cdf8a802341127854f07a550142992913c08e5f8 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc index f5e11511f512899daa211610b109a9eb1ab0aa48..3513516aead0e391b6e2b7f7edab1d1df40e7e46 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc index cad33689f69c494a18b7e339635456e717bbcc8c..21e55476c5e7510a3942806980031cd9f68a3087 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc index d19f9be71d149a1b279dc0e08e9caa1835fd3b80..328ffde0b5ae1e74313d5dad1eaa1d4183491c5c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc index 3c252a457d0f3488dac9403965aa25f55a07c590..37820e2f602ac5a45cee96b5632d480febb097eb 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc index 41d1fe0a35067d11d2180467cd1fcce1ae50fba2..13086d7ad3c18d7625e6436083e5998ed32b7369 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc index 1b5dfee08af54c8c70ae60cc44fa8e9efed9f9e8..058a5e05e7222ab6ed904fc6ae513f54097bd8c6 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc index ee85bae65d849c354cc9efde46d58ece46edd497..325489fd40aa4c8f608d3be133a2de4164a27f15 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc index 88d3d414d8cd428f831a71e89e6e76562baeb5f5..3246f14ad03deeaa2310f0379a58db2f3b129d80 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc index 5d1218b1505647205f0c3f1e1461a1d9cd811272..f8a5e212ac2c3a6a34ec561085a5f2d50af1bab6 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc index f7bc8baf502797265657ba680a3f8bd79ee545cd..573c33d2c0e4d25885506312df6fdd21942160dc 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc index 0cf2dffbfc32b5f8fddcfc223eca302d0fe8281c..34f0331fd0ab1522ae88c743c3362a9d3a0c98c6 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc index 9df99afff8fef2d7492cc62695a8ca52970f41c4..963bb033ce7d37db64456bf7eda1ca7193e0a9a4 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc index f2dd1f19f79eeb6a9b8a145e002614faf0df2ad4..2573684cd3150a6afc59211fd562a770e539d1ad 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc index 6ea253bc6d16256990bffc3ab92fcc6526049ecf..e2eb9c817370edf4bacc516dd28cfc9c53add0d0 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc index f4c50a844528dfad0ef89a7bfdf1e888b55a2a9c..31c86bd9b9674c37b13c092d312dfd271d0f6f1a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc index a97af3630cf053f2f05a549684530ff2a2ef7917..2021ec14a1a3e9f813862b7700b9ec75c497f162 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc index d3e35d0a6fe11799320d2141c5d6cc2b0525d65e..3abc1be9af33de5ae8b13138e05a657f0845b09a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc index 1abea7d62163f45174b15b007dbe99b6343803cf..ce8b7d65e17dd636abeeddb5abba576f1c477965 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc index 791ab73d07b44d103ace2a54a3572c2072a4c854..a422647a0cf9603762367665d85cfcb940bffdb3 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc index 8dbc8d66063ae8863b3e4b835c0791f89b669b93..1795be846fea7d45a70357b0eb2b3dc659c987a4 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc index 8d0b1207b010164e4df73b0851cedde11853b26e..c32ea72ede68e1a8dcb90aff5cb3c8902d4ccb8d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc index 2656aefd72d24f3c1270789e613f0564a4b67bff..af5fdb7c9893b431fb1a18664c5025690689ff56 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc index 9ac7692dea85ec4c684e1796927ae138bd706900..8fa055eb0d10d3d822068efaab04e465e66c51cf 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc index 0ca8fb04d56b97cf612df55bd7a5becb1c2da479..34feea3e01458bb16637d489b858ceb7af18c8a4 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc index 4bacbee26806ed41f670d75c00a12c4feb4d29d1..91334f2ff57924923fb6290edccb3be0549db698 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc index 3d0d817626d396d05ab0353b5faf070cc7b04ebb..5b85ea2b05892eaf9e8143982b25e375f26bee1b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc index 55f374e0b405b14e61ea995ededed354f05f1e37..41dd136adb2d14a82e45dd39a42ef0e3444e091c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc index 3687e42a7dfb6f7b3f920ce733f62b2b2e5de570..6102cf4071d38632653a2c44d6db79b827eff1ee 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc index 9be3b6c93a847b431466213a367445a9b38c1727..4d98cfbfa202eef8d2deae89eb287de319b1244b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc index b6dded8bfeb5a705b29e9b9bebbadde671916767..b18f1f84aadb424f9b0c3506e976bbf0c4c8e533 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc index 89414bf1165b3f8228fd18d0e513a5b3a51d7f01..e9b568d3f08cacc4de85d962cb26574dbccd5e93 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc index 7facc5facd672cbde4b74d0744c6a88e1a0e41c6..ea04e5774b9f3d46dbc98a85493981256f3e78ff 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc index 8a83f3644c48076969d2c50d109e4a9715f4d2e4..730bc01669f3c952d3f80b41148db120a55a382d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc index ae02e7b2e6cc7a8eedbbef05c06b9c909466b970..2d8ebfea5cc46a88c98159c6cca757a034c5d68b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc index 255c5206ab4c4fd05640a8839db9030fdbe258ef..34a58fbaceb2ddd0c73862d930b63e07bfa6a41a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc index a3a82986b2e3d9d28203838d4c8a0c76a4293f1a..64e4789944d2cbfd2c82305a52f816600e666954 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc index b37a00a978c7b69c2e8c56be36271122a307a625..a2398e9874fc75ec60d63bfdd98857d4b7653d90 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc index cb48cba1814bdbd6d4843358fd70cdec9a0c92b0..e9143c6c8080f39dc71ff6b7cc11a1852d0a41f8 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc index de26e43221d14cc378caf5c42dc3d61aab139ceb..0e952f149e3f8d0578b1f321312992c301bc8085 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc index e5b60ef394f47f58e743c0b0ee0c229f8ad58efe..abd85d89cc53682b9cde5b719f653ea937ca7051 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc index 915a9db4eae5031250a064419192c4ca68341b22..93e2c0c6668304ba814b50843aa948479523d471 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc index d7b2a6d5db74649c45974a757a664ab747154b9f..80a14dd721e59319e2f9d6e547035979738bbf66 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc index 8de96ca8666e2f373b49fd3dda41ec7888e44420..101ba1398cfe5dc70501c842e19b0296e278368a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc index 9b39c57f11a3e4757f43257bfa9cef6f4ad74aaf..929511048aa3a8852511620304f64938a1c7f14e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc index 8212e51c28b1fe219fe08e408723a977fb7dc95c..d983bcbe5d0c05316d311936e686af13417cace5 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc index 8af790e58b85f1d326d9982177f9663eb633110d..0a8342cbf244607385d9dd9b78c5b32f508d2e40 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc index 2c269e3ffbfc9df797ac5718b51010b5ea3ec95d..d8a331e2c5dfdfbcc77077378ccdd285ad3d3118 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc index 37c501ca0c49b1f9b203222137632121a683a910..31ffb76801fe65449a958849bb8c71fe54cddc7f 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc index 29a0927c72e2d5a75d8b0681e26a10c48329abc6..ab5ce1277995d88f28891ea7633b69cd0c7f4c40 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc index 19e796b2daf7b76907e059884beb8c81bb78c151..89effb5a752a9a4fd83a5f3a666ea37c56d0a886 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc index 1513f13925627991073acd1452307eafd903f305..64c5f608a06830edb9643b98414d94319488214b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc index bbcdcd3ce6a7c980cd232722696089fd7ffb628d..8bfc42597bbb3330c8e742b5a30c3dd0052e8d32 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc index 71bd7051d219427e00afdfff8640f03e62cdcd4c..a8b7d590783edae488f1d02179cbf2442eb3ca99 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc index f0d768a9908e4b6781756c59922b5285e0fec154..64e4d1843656da7579ba404998bb7ecd60a15795 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc index 84f725e2f22fb5705634c4cc23739b210c89b39c..285f1e639290d91bef15ad8e524d3d19c7b6675a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc index 7fed6db02d37c52c2949e919700e461eecae47a8..6fd2d6a64d63aa5929a041a5b8c90979595b0b32 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc index 2f119d2fc7595f1ed17d9366309bebc03f27c3d7..db940180c3e809caba4ca7bfff4cb268f095f430 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc index fbee5183bba6f97a5a26f50a4cd907c0f73ca0e3..cf9196f4398d6b17973c0e34055cbe8422bab99e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc index 42f79c0646cbae94b79be77bc7c184d9dec78ac7..6c97f74433189499294ea7952e7f6b4164e3a05d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc index a9621c9f7c93e6f58bb06cf0aaa4b1dd4047a70e..c4738a44574281a17a6df8fa160853dc1c38698a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc index 2ecad9e912b7c0dc78a2d4617efabf916f0e08da..3420a9f227d714c181608c9ddf8fb3c7c0a16903 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc index 839a09f219a53dfafc42b4b0418ea94c027e0296..f165ee2c901ef2fdd88be44e983a4725f587433c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc index 2006f305f76a40c5fe96ca30ca2336c93113a050..f0053d391616b74225b83f60983412f7021e4a60 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc index ce7bf3e5c3dd4bec388dbad6b41f9452269639d7..bda7305cb44f7f94bd6dfd6dd26c2225aa2c1e37 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc index b38dd37df5648c59384c24eec6c8a28d47a44187..bfaee3953b559287b4bd70f408a3310bf6da3d89 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc index a42cb8934c6cc4fca8964d636dfd81410cb90c33..20fca3de5cfb1656c6d1e3e28fe8b04dc3b5db0e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc index 3f03f2e565d701f4357dbe9bce4d3a592584278a..808a158b07b2a3c275d2d6cafbb74b4741eff1fc 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/vgg.py b/pytorch-image-models/timm/models/vgg.py index c096df23fb5fcd049ed350ec0de5ff3ab642d8a1..a4cfbffdff7c9a0faed0df2bc8e01a71bbe349f5 100644 --- a/pytorch-image-models/timm/models/vgg.py +++ b/pytorch-image-models/timm/models/vgg.py @@ -38,8 +38,8 @@ class ConvMlp(nn.Module): kernel_size=7, mlp_ratio=1.0, drop_rate: float = 0.2, - act_layer: Optional[Type[nn.Module]] = None, - conv_layer: Optional[Type[nn.Module]] = None, + act_layer: Type[nn.Module] = nn.ReLU, + conv_layer: Type[nn.Module] = nn.Conv2d, ): super(ConvMlp, self).__init__() self.input_kernel_size = kernel_size diff --git a/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc index b52fa4eff0a76b9408adca327561c232c0430e2f..8e135cf58169f0b44d10ed3da0426cdf461cec16 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc index 20f26e63633a6bdd4e5eccbdfe0d9e0cf25e0f1c..36e7312ef7ba8283df651294d7583a062c5ba471 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc index 43688be3a6bec6d1e2ac0f577d1c10ea818ee047..dc3cc844406068330fb1bd682e94bf5ea1c07da5 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc index 278de8d4f10fd682ea970ff108671cdb0df63c18..9ffa738501f824b73e7f473536d7a70ca89f4dda 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc index 409cd481654de36562d1db8ed7d2e79de3c6fa15..bcacc479590485744fd8adb1e7fd2bf1e6adb74f 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc index 615f4063e61b621f2030459c503c40470236d8fa..65e6c5b44253304f86d2555620aff0412a55070b 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc index 2ec48b0becc6b9c182dcec65a7601ee35d678137..f02196c85b14e7c4e234e7b0d1d26fc77750bd0c 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc index 60b2436e52a9282922c4cbf516ba758d129a1e77..bced0ce28e60233d40e95cf3bfeb35eb2fc49747 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc index 3c0831de0df2d164efcd8efaf027932da0fe20b6..90501cced3c42feb23855960bdc3742559a2a9eb 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc index 656b8e6b5017eedd2d850ad1cedf386d8b4f157d..212d4e309824c0b7b74fcb0f0337216c57bce24d 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc index 37823e9ba098758aa1d2bec380c7b8bf1a2ea4f7..86bc71404809e53e0c098f22f14b601940b19dab 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc index f876b13ef8dae9e993780d06ebf911363fbabd92..cccb88fb252d6b50ab13400139e1b63015aae47d 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc index 60afdca3368041cf6a55fb173419b307d771412f..011505865cfd10de963bf0bd2c53a160814e2cba 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc index 9dd8df439189beef33d505f0beaf10f7beb50e25..20125f850551c26dfe429eed944dfa1a2d14bae2 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc index f0291bea9b024c1a682cbc969a0eabc68c1634b4..2de3d610773fa6ae6eb94b379588250820245831 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc index 086eeb798f7d08fcdd048ec780cac12043bde243..b529a121ac010c70cbdc7015e7032b232f73d842 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc index 28cfd85e1f3802ded79e74db5f583ffda72a9925..8737981cbd4587b0d5b484a307dea686f735cd21 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc index edb37e5c98f53bc88da1bdd99fa696d31454d5f1..33e86f0848c59ac5b924ca8344d5b8eb9160e19e 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc index daddb2368c24dd880e71f5784aded6c9cfea17bf..9ccfe95d68291eb92c078942c38cf5157444c4bf 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc index 9e5dca5af1f1d60fe77ab44fc90201b93f953e93..849eecf9608a3b57eab0d87bf841a67fbd99937a 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc index 90078008d91d73b71578444634a51798ecf965e4..2b76ca56c811383ec703104c2e02f3df4cf74222 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc index 41738127e5006ed7b6d269d2f05dff37af3073df..63f2e4647385b1dcf5f799418b7d7f9e4639f7ad 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc index 54b03a57e5dae9083c0df2aeec052edde88bc3b1..154ceaf9b3ffc5b5878abea181d9426fe4cabc8e 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc index d3737622b998515d6386bd414635d31f5c2d94a0..6b84126209cc777b90b84f7205a35b947bacf5dc 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc index 51af32a625ae8a12c844648d28079b5a1e23746b..b6e0af6c94301baac239dc966a65d6cc2db31b4e 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc index e0f97720d47bf1d0763a1ce6b1a5049e783d59e6..b78f86b72771b2647df6b375704c96c1001aefe5 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc index 1f1282b59df3ce34f5f813c95f0a2b518a3dc733..35b51c02e89ebb4768b465cd5d6807e2600b01f2 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc index e779e08e5634a49370c1f18cad414b28c3875437..c9afa11b8d51d131c0fffb77b5133c192209e0fe 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc index bbee8fe7243bbaf3b4f8a959f61a37b1fe8d1af2..dd4aa17273a3f707b00387ce2e6efb9dea5c1a18 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc index 30adcc40c6a4398cc5b4b6a866ba78568a038a62..b05466c458a4f29746d9b89926d17c3d42ed2376 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc index 5c686c4976f763575922f343d27615abd35f3c56..bf887085e6e3c5d4402e3263603a4e9a76d7815c 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc index 6e1711b690b15c5e2ed08fd43c913ce95c519a96..8b4b9c210765b13b61d1740fcbc371d296227bf3 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc index 7faa56173fc25aa1c510f647f6bd91dae0139af6..45527170a64ad2bae84aa965e7afecbccf8cdfab 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc index bca3a307dfe3e88c8c723043d728162ffc87fd6d..be4341b040a54324a084dd9c6996e184c46d9f85 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc index 0a6986e76e3c15b28a3d42912a36bf05b10f074e..f399a76c8bfb5d3160d0d554cbede7154d37d1f1 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc index cfb73a760f45c9ad19a3e1919c42ba2bdcdceb8d..1fed152d50e5a90bd974dfbc3e6483e5d64d2dac 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc index 66225f7018530ede07eb901a47669c07c401477e..054a357a22715e0db2363248150e0ca1ea54f655 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc index a5e2a43fb3ea7ae1854379854d3d1a15982df346..2817a10c90ce016144c3a21e6f7aa02cc59e8104 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc index b0f028b83983d8c92d97c710565e12ac18b9bf7b..dcb9f36b08e718a84c9668504f400b29eaaddc48 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc index b82bd841ba84be6bea608d5833d5abd13e240494..a2e857d0de7926254ff0721d9f8231c7048685a8 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc index e9187a8c53667d0fc74113a60842ba0873139250..b4e9cc7cbafd79d0cc1af75bb71dab27670a76ea 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc index 2b93c824fc44e069d81cc4ab22a87e5cd07770c8..c098524d70e2c8e88777b5c462f0d43a251b6d2c 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc index 516ac69ccd4f3c0e31bf1f02cf33a7aba3da5b3a..10d3ab86833a5d3a83fd3e3a348751cc5ed7cd76 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc index 52804020d8aedaa180b438951f459a69ae5f8b79..59b1e45e55ecef6125be2783505c3a0516d1a364 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc index 97f4edce177de86f8366b56dec5351cb19232d3c..f88bb1ae2b9df0290410d5f7d97ea31823af264a 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc index 1a10b402117bbb58adf2d1d274ad395c9bc074bb..4cf97c2a58460af5fa5f0e7872c608674a13ac52 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc index dfe104c00d95d4af0e1cf11628bdaf3987cdc7e0..efbc0f04a73b22e44ea5d498c3b7b43221e05237 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc index d56f206ac7cf32408023d00777106f6523a19c55..0531f8ac2415135c07cd2a3cd784e0921c2f27dd 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc index a2c1e6d1ea2db205a3ca28a31bdd8bc00e3f3a80..76bb3e8dc7907424f3177860f7622ab91d753ec8 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc index 271986fba06f083deea1b0ed4b1975e89482873d..09ceb25ec7f6c7d2a4f373f36658e197cad54ff6 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc index 56040372d289027c41ef5cf3e88f9e2c2fe0f948..1fa1085788c439840f09344d7af03726497a4120 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc differ diff --git a/pytorch-image-models/wandb/debug-internal.log b/pytorch-image-models/wandb/debug-internal.log index 5dcca87dbf75d2a914acd51d77de3d0d94278034..f686e2d233941a66aa3b12a8966fc028eb0cdccb 100644 --- a/pytorch-image-models/wandb/debug-internal.log +++ b/pytorch-image-models/wandb/debug-internal.log @@ -1,7 +1,15 @@ -{"time":"2025-01-15T02:34:20.04157448Z","level":"INFO","msg":"stream: starting","core version":"0.19.3","symlink path":"/app/pytorch-image-models/wandb/run-20250115_023419-2lmh0nm7/logs/debug-core.log"} -{"time":"2025-01-15T02:34:20.145304132Z","level":"INFO","msg":"created new stream","id":"2lmh0nm7"} -{"time":"2025-01-15T02:34:20.145355413Z","level":"INFO","msg":"stream: started","id":"2lmh0nm7"} -{"time":"2025-01-15T02:34:20.146572237Z","level":"INFO","msg":"handler: started","stream_id":"2lmh0nm7"} -{"time":"2025-01-15T02:34:20.146611428Z","level":"INFO","msg":"writer: Do: started","stream_id":"2lmh0nm7"} -{"time":"2025-01-15T02:34:20.146810552Z","level":"INFO","msg":"sender: started","stream_id":"2lmh0nm7"} -{"time":"2025-01-15T02:34:20.284635278Z","level":"INFO","msg":"Starting system monitor"} +{"time":"2025-01-19T00:29:40.777529817Z","level":"INFO","msg":"stream: starting","core version":"0.19.3","symlink path":"/app/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-core.log"} +{"time":"2025-01-19T00:29:40.883448579Z","level":"INFO","msg":"created new stream","id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883483601Z","level":"INFO","msg":"stream: started","id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883535802Z","level":"INFO","msg":"writer: Do: started","stream_id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883571511Z","level":"INFO","msg":"handler: started","stream_id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883555421Z","level":"INFO","msg":"sender: started","stream_id":"7ozba6oj"} +{"time":"2025-01-19T00:29:41.01753464Z","level":"INFO","msg":"Starting system monitor"} +{"time":"2025-01-19T01:32:13.86031313Z","level":"INFO","msg":"stream: closing","id":"7ozba6oj"} +{"time":"2025-01-19T01:32:13.86035543Z","level":"INFO","msg":"Stopping system monitor"} +{"time":"2025-01-19T01:32:13.861082206Z","level":"INFO","msg":"Stopped system monitor"} +{"time":"2025-01-19T01:32:14.161833213Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2025-01-19T01:32:14.255169742Z","level":"INFO","msg":"handler: closed","stream_id":"7ozba6oj"} +{"time":"2025-01-19T01:32:14.255211143Z","level":"INFO","msg":"writer: Close: closed","stream_id":"7ozba6oj"} +{"time":"2025-01-19T01:32:14.255264754Z","level":"INFO","msg":"sender: closed","stream_id":"7ozba6oj"} +{"time":"2025-01-19T01:32:14.255369426Z","level":"INFO","msg":"stream: closed","id":"7ozba6oj"} diff --git a/pytorch-image-models/wandb/debug.log b/pytorch-image-models/wandb/debug.log index 72b542acbbf7df484b1de14356ac176c6b25f0c3..4a7469999d35702737cd0fa8f023882f9703efea 100644 --- a/pytorch-image-models/wandb/debug.log +++ b/pytorch-image-models/wandb/debug.log @@ -1,22 +1,23 @@ -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_setup.py:_flush():68] Current SDK version is 0.19.3 -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_setup.py:_flush():68] Configure stats pid to 196 -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_setup.py:_flush():68] Loading settings from /home/user/.config/wandb/settings -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_setup.py:_flush():68] Loading settings from /app/pytorch-image-models/wandb/settings -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_setup.py:_flush():68] Loading settings from environment variables -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_init.py:_log_setup():598] Logging user logs to /app/pytorch-image-models/wandb/run-20250115_023419-2lmh0nm7/logs/debug.log -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_init.py:_log_setup():599] Logging internal logs to /app/pytorch-image-models/wandb/run-20250115_023419-2lmh0nm7/logs/debug-internal.log -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_init.py:init():714] calling init triggers -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_init.py:init():719] wandb.init called with sweep_config: {} -config: {'data': None, 'data_dir': None, 'dataset': 'hfds/datacomp/imagenet-1k-random-80.0-frac-1over64', 'train_split': 'train', 'val_split': 'validation', 'train_num_samples': None, 'val_num_samples': None, 'dataset_download': False, 'class_map': '', 'input_img_mode': None, 'input_key': None, 'target_key': None, 'dataset_trust_remote_code': False, 'model': 'seresnet34', 'pretrained': False, 'pretrained_path': None, 'initial_checkpoint': '', 'resume': '', 'no_resume_opt': False, 'num_classes': 1000, 'gp': None, 'img_size': None, 'in_chans': None, 'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '', 'batch_size': 256, 'validation_batch_size': None, 'channels_last': False, 'fuser': '', 'grad_accum_steps': 1, 'grad_checkpointing': False, 'fast_norm': False, 'model_kwargs': {}, 'head_init_scale': None, 'head_init_bias': None, 'torchcompile_mode': None, 'torchscript': False, 'torchcompile': None, 'device': 'cuda:0', 'amp': True, 'amp_dtype': 'float16', 'amp_impl': 'native', 'model_dtype': None, 'no_ddp_bb': False, 'synchronize_step': False, 'local_rank': 0, 'device_modules': None, 'opt': 'sgd', 'opt_eps': None, 'opt_betas': None, 'momentum': 0.9, 'weight_decay': 2e-05, 'clip_grad': None, 'clip_mode': 'norm', 'layer_decay': None, 'opt_kwargs': {}, 'sched': 'cosine', 'sched_on_updates': False, 'lr': 0.4, 'lr_base': 0.1, 'lr_base_size': 256, 'lr_base_scale': '', 'lr_noise': None, 'lr_noise_pct': 0.67, 'lr_noise_std': 1.0, 'lr_cycle_mul': 1.0, 'lr_cycle_decay': 0.5, 'lr_cycle_limit': 1, 'lr_k_decay': 1.0, 'warmup_lr': 1e-05, 'min_lr': 0, 'epochs': 150, 'epoch_repeats': 0.0, 'start_epoch': None, 'decay_milestones': [90, 180, 270], 'decay_epochs': 90, 'warmup_epochs': 5, 'warmup_prefix': False, 'cooldown_epochs': 0, 'patience_epochs': 10, 'decay_rate': 0.1, 'no_aug': False, 'train_crop_mode': None, 'scale': [0.08, 1.0], 'ratio': [0.75, 1.3333333333333333], 'hflip': 0.5, 'vflip': 0.0, 'color_jitter': 0.4, 'color_jitter_prob': None, 'grayscale_prob': None, 'gaussian_blur_prob': None, 'aa': None, 'aug_repeats': 0, 'aug_splits': 0, 'jsd_loss': False, 'bce_loss': False, 'bce_sum': False, 'bce_target_thresh': None, 'bce_pos_weight': None, 'reprob': 0.5, 'remode': 'pixel', 'recount': 1, 'resplit': False, 'mixup': 0.0, 'cutmix': 0.0, 'cutmix_minmax': None, 'mixup_prob': 1.0, 'mixup_switch_prob': 0.5, 'mixup_mode': 'batch', 'mixup_off_epoch': 0, 'smoothing': 0.1, 'train_interpolation': 'random', 'drop': 0.0, 'drop_connect': None, 'drop_path': None, 'drop_block': None, 'bn_momentum': None, 'bn_eps': None, 'sync_bn': False, 'dist_bn': 'reduce', 'split_bn': False, 'model_ema': False, 'model_ema_force_cpu': False, 'model_ema_decay': 0.9998, 'model_ema_warmup': False, 'seed': 42, 'worker_seeding': 'all', 'log_interval': 50, 'recovery_interval': 0, 'checkpoint_hist': 10, 'workers': 4, 'save_images': False, 'pin_mem': False, 'no_prefetcher': False, 'output': '', 'experiment': 'ImageNetTraining80.0-frac-1over64', 'eval_metric': 'top1', 'tta': 0, 'use_multi_epochs_loader': False, 'log_wandb': True, 'wandb_project': None, 'wandb_tags': [], 'wandb_resume_id': '', 'prefetcher': True, 'distributed': True, 'world_size': 4, 'rank': 0} -2025-01-15 02:34:19,771 INFO MainThread:196 [wandb_init.py:init():745] starting backend -2025-01-15 02:34:20,017 INFO MainThread:196 [wandb_init.py:init():749] sending inform_init request -2025-01-15 02:34:20,038 INFO MainThread:196 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn -2025-01-15 02:34:20,038 INFO MainThread:196 [wandb_init.py:init():764] backend started and connected -2025-01-15 02:34:20,042 INFO MainThread:196 [wandb_init.py:init():857] updated telemetry -2025-01-15 02:34:20,064 INFO MainThread:196 [wandb_init.py:init():889] communicating run to backend with 90.0 second timeout -2025-01-15 02:34:20,281 INFO MainThread:196 [wandb_init.py:init():941] starting run threads in backend -2025-01-15 02:34:20,408 INFO MainThread:196 [wandb_run.py:_console_start():2420] atexit reg -2025-01-15 02:34:20,408 INFO MainThread:196 [wandb_run.py:_redirect():2270] redirect: wrap_raw -2025-01-15 02:34:20,409 INFO MainThread:196 [wandb_run.py:_redirect():2335] Wrapping output streams. -2025-01-15 02:34:20,409 INFO MainThread:196 [wandb_run.py:_redirect():2360] Redirects installed. -2025-01-15 02:34:20,412 INFO MainThread:196 [wandb_init.py:init():983] run started, returning control to user process +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Current SDK version is 0.19.3 +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Configure stats pid to 139 +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Loading settings from /home/user/.config/wandb/settings +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Loading settings from /app/pytorch-image-models/wandb/settings +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Loading settings from environment variables +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:_log_setup():598] Logging user logs to /app/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug.log +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:_log_setup():599] Logging internal logs to /app/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-internal.log +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:init():714] calling init triggers +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:init():719] wandb.init called with sweep_config: {} +config: {'data': None, 'data_dir': None, 'dataset': 'hfds/datacomp/imagenet-1k-random-80.0-frac-1over64', 'train_split': 'train', 'val_split': 'validation', 'train_num_samples': None, 'val_num_samples': None, 'dataset_download': False, 'class_map': '', 'input_img_mode': None, 'input_key': None, 'target_key': None, 'dataset_trust_remote_code': False, 'model': 'seresnet34', 'pretrained': False, 'pretrained_path': None, 'initial_checkpoint': '', 'resume': '', 'no_resume_opt': False, 'num_classes': 1000, 'gp': None, 'img_size': None, 'in_chans': None, 'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '', 'batch_size': 256, 'validation_batch_size': None, 'channels_last': False, 'fuser': '', 'grad_accum_steps': 1, 'grad_checkpointing': False, 'fast_norm': False, 'model_kwargs': {}, 'head_init_scale': None, 'head_init_bias': None, 'torchcompile_mode': None, 'torchscript': False, 'torchcompile': None, 'device': 'cuda:0', 'amp': True, 'amp_dtype': 'float16', 'amp_impl': 'native', 'model_dtype': None, 'no_ddp_bb': False, 'synchronize_step': False, 'local_rank': 0, 'device_modules': None, 'opt': 'sgd', 'opt_eps': None, 'opt_betas': None, 'momentum': 0.9, 'weight_decay': 2e-05, 'clip_grad': None, 'clip_mode': 'norm', 'layer_decay': None, 'opt_kwargs': {}, 'sched': 'cosine', 'sched_on_updates': False, 'lr': 0.4, 'lr_base': 0.1, 'lr_base_size': 256, 'lr_base_scale': '', 'lr_noise': None, 'lr_noise_pct': 0.67, 'lr_noise_std': 1.0, 'lr_cycle_mul': 1.0, 'lr_cycle_decay': 0.5, 'lr_cycle_limit': 1, 'lr_k_decay': 1.0, 'warmup_lr': 1e-05, 'min_lr': 0, 'epochs': 150, 'epoch_repeats': 0.0, 'start_epoch': None, 'decay_milestones': [90, 180, 270], 'decay_epochs': 90, 'warmup_epochs': 5, 'warmup_prefix': False, 'cooldown_epochs': 0, 'patience_epochs': 10, 'decay_rate': 0.1, 'no_aug': False, 'train_crop_mode': None, 'scale': [0.08, 1.0], 'ratio': [0.75, 1.3333333333333333], 'hflip': 0.5, 'vflip': 0.0, 'color_jitter': 0.4, 'color_jitter_prob': None, 'grayscale_prob': None, 'gaussian_blur_prob': None, 'aa': None, 'aug_repeats': 0, 'aug_splits': 0, 'jsd_loss': False, 'bce_loss': False, 'bce_sum': False, 'bce_target_thresh': None, 'bce_pos_weight': None, 'reprob': 0.5, 'remode': 'pixel', 'recount': 1, 'resplit': False, 'mixup': 0.0, 'cutmix': 0.0, 'cutmix_minmax': None, 'mixup_prob': 1.0, 'mixup_switch_prob': 0.5, 'mixup_mode': 'batch', 'mixup_off_epoch': 0, 'smoothing': 0.1, 'train_interpolation': 'random', 'drop': 0.0, 'drop_connect': None, 'drop_path': None, 'drop_block': None, 'bn_momentum': None, 'bn_eps': None, 'sync_bn': False, 'dist_bn': 'reduce', 'split_bn': False, 'model_ema': False, 'model_ema_force_cpu': False, 'model_ema_decay': 0.9998, 'model_ema_warmup': False, 'seed': 42, 'worker_seeding': 'all', 'log_interval': 50, 'recovery_interval': 0, 'checkpoint_hist': 10, 'workers': 4, 'save_images': False, 'pin_mem': False, 'no_prefetcher': False, 'output': '', 'experiment': 'ImageNetTraining80.0-frac-1over64', 'eval_metric': 'top1', 'tta': 0, 'use_multi_epochs_loader': False, 'log_wandb': True, 'wandb_project': 'ImageNetTraining80.0-frac-1over64', 'wandb_tags': [], 'wandb_resume_id': '', 'prefetcher': True, 'distributed': True, 'world_size': 4, 'rank': 0} +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:init():745] starting backend +2025-01-19 00:29:40,753 INFO MainThread:139 [wandb_init.py:init():749] sending inform_init request +2025-01-19 00:29:40,774 INFO MainThread:139 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2025-01-19 00:29:40,774 INFO MainThread:139 [wandb_init.py:init():764] backend started and connected +2025-01-19 00:29:40,779 INFO MainThread:139 [wandb_init.py:init():857] updated telemetry +2025-01-19 00:29:40,803 INFO MainThread:139 [wandb_init.py:init():889] communicating run to backend with 90.0 second timeout +2025-01-19 00:29:41,014 INFO MainThread:139 [wandb_init.py:init():941] starting run threads in backend +2025-01-19 00:29:41,102 INFO MainThread:139 [wandb_run.py:_console_start():2420] atexit reg +2025-01-19 00:29:41,102 INFO MainThread:139 [wandb_run.py:_redirect():2270] redirect: wrap_raw +2025-01-19 00:29:41,103 INFO MainThread:139 [wandb_run.py:_redirect():2335] Wrapping output streams. +2025-01-19 00:29:41,103 INFO MainThread:139 [wandb_run.py:_redirect():2360] Redirects installed. +2025-01-19 00:29:41,105 INFO MainThread:139 [wandb_init.py:init():983] run started, returning control to user process +2025-01-19 01:32:13,860 WARNING MsgRouterThr:139 [router.py:message_loop():75] message_loop has been closed diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/config.yaml b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5f70ce6b3532afd5117e4ffdd5e843ceb029d3fe --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/config.yaml @@ -0,0 +1,338 @@ +_wandb: + value: + cli_version: 0.19.3 + m: [] + python_version: 3.9.21 + t: + "1": + - 1 + - 41 + - 49 + - 51 + - 55 + - 63 + - 105 + "2": + - 1 + - 41 + - 49 + - 51 + - 55 + - 63 + - 105 + "3": + - 13 + - 14 + - 15 + - 16 + - 23 + - 55 + "4": 3.9.21 + "5": 0.19.3 + "8": + - 5 + "12": 0.19.3 + "13": linux-x86_64 +aa: + value: null +amp: + value: true +amp_dtype: + value: float16 +amp_impl: + value: native +aug_repeats: + value: 0 +aug_splits: + value: 0 +batch_size: + value: 256 +bce_loss: + value: false +bce_pos_weight: + value: null +bce_sum: + value: false +bce_target_thresh: + value: null +bn_eps: + value: null +bn_momentum: + value: null +channels_last: + value: false +checkpoint_hist: + value: 10 +class_map: + value: "" +clip_grad: + value: null +clip_mode: + value: norm +color_jitter: + value: 0.4 +color_jitter_prob: + value: null +cooldown_epochs: + value: 0 +crop_pct: + value: null +cutmix: + value: 0 +cutmix_minmax: + value: null +data: + value: null +data_dir: + value: null +dataset: + value: hfds/datacomp/imagenet-1k-random-80.0-frac-1over64 +dataset_download: + value: false +dataset_trust_remote_code: + value: false +decay_epochs: + value: 90 +decay_milestones: + value: + - 90 + - 180 + - 270 +decay_rate: + value: 0.1 +device: + value: cuda:0 +device_modules: + value: null +dist_bn: + value: reduce +distributed: + value: true +drop: + value: 0 +drop_block: + value: null +drop_connect: + value: null +drop_path: + value: null +epoch_repeats: + value: 0 +epochs: + value: 150 +eval_metric: + value: top1 +experiment: + value: ImageNetTraining80.0-frac-1over64 +fast_norm: + value: false +fuser: + value: "" +gaussian_blur_prob: + value: null +gp: + value: null +grad_accum_steps: + value: 1 +grad_checkpointing: + value: false +grayscale_prob: + value: null +head_init_bias: + value: null +head_init_scale: + value: null +hflip: + value: 0.5 +img_size: + value: null +in_chans: + value: null +initial_checkpoint: + value: "" +input_img_mode: + value: null +input_key: + value: null +input_size: + value: null +interpolation: + value: "" +jsd_loss: + value: false +layer_decay: + value: null +local_rank: + value: 0 +log_interval: + value: 50 +log_wandb: + value: true +lr: + value: 0.4 +lr_base: + value: 0.1 +lr_base_scale: + value: "" +lr_base_size: + value: 256 +lr_cycle_decay: + value: 0.5 +lr_cycle_limit: + value: 1 +lr_cycle_mul: + value: 1 +lr_k_decay: + value: 1 +lr_noise: + value: null +lr_noise_pct: + value: 0.67 +lr_noise_std: + value: 1 +mean: + value: null +min_lr: + value: 0 +mixup: + value: 0 +mixup_mode: + value: batch +mixup_off_epoch: + value: 0 +mixup_prob: + value: 1 +mixup_switch_prob: + value: 0.5 +model: + value: seresnet34 +model_dtype: + value: null +model_ema: + value: false +model_ema_decay: + value: 0.9998 +model_ema_force_cpu: + value: false +model_ema_warmup: + value: false +momentum: + value: 0.9 +no_aug: + value: false +no_ddp_bb: + value: false +no_prefetcher: + value: false +no_resume_opt: + value: false +num_classes: + value: 1000 +opt: + value: sgd +opt_betas: + value: null +opt_eps: + value: null +output: + value: "" +patience_epochs: + value: 10 +pin_mem: + value: false +prefetcher: + value: true +pretrained: + value: false +pretrained_path: + value: null +rank: + value: 0 +ratio: + value: + - 0.75 + - 1.3333333333333333 +recount: + value: 1 +recovery_interval: + value: 0 +remode: + value: pixel +reprob: + value: 0.5 +resplit: + value: false +resume: + value: "" +save_images: + value: false +scale: + value: + - 0.08 + - 1 +sched: + value: cosine +sched_on_updates: + value: false +seed: + value: 42 +smoothing: + value: 0.1 +split_bn: + value: false +start_epoch: + value: null +std: + value: null +sync_bn: + value: false +synchronize_step: + value: false +target_key: + value: null +torchcompile: + value: null +torchcompile_mode: + value: null +torchscript: + value: false +train_crop_mode: + value: null +train_interpolation: + value: random +train_num_samples: + value: null +train_split: + value: train +tta: + value: 0 +use_multi_epochs_loader: + value: false +val_num_samples: + value: null +val_split: + value: validation +validation_batch_size: + value: null +vflip: + value: 0 +wandb_project: + value: ImageNetTraining80.0-frac-1over64 +wandb_resume_id: + value: "" +wandb_tags: + value: [] +warmup_epochs: + value: 5 +warmup_lr: + value: 1e-05 +warmup_prefix: + value: false +weight_decay: + value: 2e-05 +worker_seeding: + value: all +workers: + value: 4 +world_size: + value: 4 diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/output.log b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..3eca59402ed0fa0d25e0fec67ac76a206f89d9b4 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/output.log @@ -0,0 +1,1342 @@ +Scheduled epochs: 150 (epochs + cooldown_epochs). Warmup within epochs when warmup_prefix=False. LR stepped per epoch. +Train: 0 [ 0/19 ( 5%)] Loss: 6.97 (6.97) Time: 4.227s, 242.25/s (4.227s, 242.25/s) LR: 1.000e-05 Data: 1.662 (1.662) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.682 (1.682) Loss: 6.925 ( 6.925) Acc@1: 0.098 ( 0.098) Acc@5: 0.391 ( 0.391) +Test: [ 48/48] Time: 0.653 (0.344) Loss: 6.923 ( 6.920) Acc@1: 0.000 ( 0.104) Acc@5: 0.472 ( 0.586) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 1 [ 0/19 ( 5%)] Loss: 6.94 (6.94) Time: 1.712s, 598.27/s (1.712s, 598.27/s) LR: 8.001e-02 Data: 1.359 (1.359) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.941 ( 6.941) Acc@1: 0.098 ( 0.098) Acc@5: 0.684 ( 0.684) +Test: [ 48/48] Time: 0.086 (0.323) Loss: 6.924 ( 6.936) Acc@1: 0.472 ( 0.118) Acc@5: 0.825 ( 0.498) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 2 [ 0/19 ( 5%)] Loss: 6.89 (6.89) Time: 1.650s, 620.60/s (1.650s, 620.60/s) LR: 1.600e-01 Data: 1.289 (1.289) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.397 (1.397) Loss: 6.956 ( 6.956) Acc@1: 0.098 ( 0.098) Acc@5: 0.488 ( 0.488) +Test: [ 48/48] Time: 0.086 (0.324) Loss: 6.947 ( 6.953) Acc@1: 0.118 ( 0.150) Acc@5: 0.354 ( 0.642) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 3 [ 0/19 ( 5%)] Loss: 6.89 (6.89) Time: 2.005s, 510.74/s (2.005s, 510.74/s) LR: 2.400e-01 Data: 1.084 (1.084) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.390 (1.390) Loss: 6.933 ( 6.933) Acc@1: 0.098 ( 0.098) Acc@5: 0.684 ( 0.684) +Test: [ 48/48] Time: 0.086 (0.322) Loss: 6.911 ( 6.920) Acc@1: 0.118 ( 0.222) Acc@5: 1.533 ( 0.950) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 4 [ 0/19 ( 5%)] Loss: 6.87 (6.87) Time: 1.668s, 613.79/s (1.668s, 613.79/s) LR: 3.200e-01 Data: 1.306 (1.306) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.907 ( 6.907) Acc@1: 0.391 ( 0.391) Acc@5: 0.879 ( 0.879) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 6.888 ( 6.904) Acc@1: 0.354 ( 0.286) Acc@5: 1.415 ( 1.104) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 5 [ 0/19 ( 5%)] Loss: 6.86 (6.86) Time: 1.512s, 677.10/s (1.512s, 677.10/s) LR: 3.989e-01 Data: 1.149 (1.149) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 6.918 ( 6.918) Acc@1: 0.293 ( 0.293) Acc@5: 1.074 ( 1.074) +Test: [ 48/48] Time: 0.086 (0.324) Loss: 6.882 ( 6.895) Acc@1: 0.825 ( 0.416) Acc@5: 1.769 ( 1.366) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 6 [ 0/19 ( 5%)] Loss: 6.84 (6.84) Time: 1.544s, 663.15/s (1.544s, 663.15/s) LR: 3.984e-01 Data: 1.179 (1.179) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 6.909 ( 6.909) Acc@1: 0.000 ( 0.000) Acc@5: 0.977 ( 0.977) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 6.872 ( 6.881) Acc@1: 0.472 ( 0.318) Acc@5: 1.179 ( 1.330) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 7 [ 0/19 ( 5%)] Loss: 6.82 (6.82) Time: 1.468s, 697.44/s (1.468s, 697.44/s) LR: 3.979e-01 Data: 1.103 (1.103) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 6.895 ( 6.895) Acc@1: 0.195 ( 0.195) Acc@5: 0.879 ( 0.879) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 6.850 ( 6.872) Acc@1: 0.590 ( 0.400) Acc@5: 1.533 ( 1.480) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 8 [ 0/19 ( 5%)] Loss: 6.79 (6.79) Time: 1.788s, 572.71/s (1.788s, 572.71/s) LR: 3.972e-01 Data: 1.421 (1.421) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 6.885 ( 6.885) Acc@1: 0.391 ( 0.391) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 6.846 ( 6.865) Acc@1: 0.708 ( 0.502) Acc@5: 2.358 ( 1.672) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 9 [ 0/19 ( 5%)] Loss: 6.78 (6.78) Time: 1.538s, 665.90/s (1.538s, 665.90/s) LR: 3.965e-01 Data: 1.137 (1.137) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.455 (1.455) Loss: 6.903 ( 6.903) Acc@1: 0.293 ( 0.293) Acc@5: 0.977 ( 0.977) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 6.855 ( 6.871) Acc@1: 0.708 ( 0.502) Acc@5: 2.358 ( 1.610) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-0.pth.tar', 0.104) + +Train: 10 [ 0/19 ( 5%)] Loss: 6.77 (6.77) Time: 1.803s, 568.00/s (1.803s, 568.00/s) LR: 3.956e-01 Data: 1.210 (1.210) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 6.895 ( 6.895) Acc@1: 0.391 ( 0.391) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.087 (0.326) Loss: 6.861 ( 6.867) Acc@1: 0.708 ( 0.456) Acc@5: 2.476 ( 1.732) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-1.pth.tar', 0.1179999998664856) + +Train: 11 [ 0/19 ( 5%)] Loss: 6.75 (6.75) Time: 1.635s, 626.35/s (1.635s, 626.35/s) LR: 3.947e-01 Data: 1.190 (1.190) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.405 (1.405) Loss: 6.900 ( 6.900) Acc@1: 0.488 ( 0.488) Acc@5: 1.367 ( 1.367) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 6.869 ( 6.874) Acc@1: 0.590 ( 0.492) Acc@5: 1.769 ( 1.612) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-2.pth.tar', 0.1499999999666214) + +Train: 12 [ 0/19 ( 5%)] Loss: 6.73 (6.73) Time: 1.863s, 549.78/s (1.863s, 549.78/s) LR: 3.937e-01 Data: 1.496 (1.496) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.399 (1.399) Loss: 6.896 ( 6.896) Acc@1: 0.391 ( 0.391) Acc@5: 1.172 ( 1.172) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 6.852 ( 6.868) Acc@1: 0.825 ( 0.478) Acc@5: 2.123 ( 1.854) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-3.pth.tar', 0.2219999999666214) + +Train: 13 [ 0/19 ( 5%)] Loss: 6.69 (6.69) Time: 1.597s, 641.33/s (1.597s, 641.33/s) LR: 3.926e-01 Data: 1.228 (1.228) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.420 (1.420) Loss: 6.894 ( 6.894) Acc@1: 0.586 ( 0.586) Acc@5: 1.270 ( 1.270) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 6.854 ( 6.867) Acc@1: 0.590 ( 0.594) Acc@5: 2.005 ( 1.848) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-4.pth.tar', 0.28600000015258786) + +Train: 14 [ 0/19 ( 5%)] Loss: 6.70 (6.70) Time: 1.717s, 596.31/s (1.717s, 596.31/s) LR: 3.915e-01 Data: 1.349 (1.349) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.395 (1.395) Loss: 6.899 ( 6.899) Acc@1: 0.391 ( 0.391) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 6.868 ( 6.876) Acc@1: 0.472 ( 0.482) Acc@5: 1.887 ( 1.744) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-14.pth.tar', 0.4819999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-6.pth.tar', 0.3179999998664856) + +Train: 15 [ 0/19 ( 5%)] Loss: 6.70 (6.70) Time: 1.956s, 523.51/s (1.956s, 523.51/s) LR: 3.902e-01 Data: 1.283 (1.283) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 6.908 ( 6.908) Acc@1: 0.293 ( 0.293) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 6.869 ( 6.880) Acc@1: 0.825 ( 0.576) Acc@5: 2.241 ( 1.956) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-14.pth.tar', 0.4819999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-7.pth.tar', 0.3999999995803833) + +Train: 16 [ 0/19 ( 5%)] Loss: 6.69 (6.69) Time: 1.558s, 657.32/s (1.558s, 657.32/s) LR: 3.889e-01 Data: 1.189 (1.189) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.413 (1.413) Loss: 6.912 ( 6.912) Acc@1: 0.195 ( 0.195) Acc@5: 0.977 ( 0.977) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 6.876 ( 6.885) Acc@1: 0.825 ( 0.556) Acc@5: 2.476 ( 1.802) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-14.pth.tar', 0.4819999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-5.pth.tar', 0.4160000000190735) + +Train: 17 [ 0/19 ( 5%)] Loss: 6.67 (6.67) Time: 1.826s, 560.70/s (1.826s, 560.70/s) LR: 3.875e-01 Data: 1.110 (1.110) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 6.916 ( 6.916) Acc@1: 0.684 ( 0.684) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 6.882 ( 6.889) Acc@1: 0.590 ( 0.638) Acc@5: 2.123 ( 1.950) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-14.pth.tar', 0.4819999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-10.pth.tar', 0.45599999929428103) + +Train: 18 [ 0/19 ( 5%)] Loss: 6.61 (6.61) Time: 1.680s, 609.44/s (1.680s, 609.44/s) LR: 3.860e-01 Data: 1.312 (1.312) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.444 (1.444) Loss: 6.907 ( 6.907) Acc@1: 0.195 ( 0.195) Acc@5: 1.367 ( 1.367) +Test: [ 48/48] Time: 0.087 (0.327) Loss: 6.881 ( 6.882) Acc@1: 0.354 ( 0.562) Acc@5: 1.887 ( 1.914) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-14.pth.tar', 0.4819999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-12.pth.tar', 0.4780000000190735) + +Train: 19 [ 0/19 ( 5%)] Loss: 6.61 (6.61) Time: 1.634s, 626.71/s (1.634s, 626.71/s) LR: 3.844e-01 Data: 1.264 (1.264) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.453 (1.453) Loss: 6.915 ( 6.915) Acc@1: 0.293 ( 0.293) Acc@5: 1.270 ( 1.270) +Test: [ 48/48] Time: 0.088 (0.328) Loss: 6.875 ( 6.889) Acc@1: 0.472 ( 0.550) Acc@5: 2.594 ( 1.812) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-19.pth.tar', 0.5499999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-14.pth.tar', 0.4819999998664856) + +Train: 20 [ 0/19 ( 5%)] Loss: 6.63 (6.63) Time: 1.705s, 600.61/s (1.705s, 600.61/s) LR: 3.827e-01 Data: 1.334 (1.334) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.934 ( 6.934) Acc@1: 0.488 ( 0.488) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 6.931 ( 6.926) Acc@1: 0.236 ( 0.456) Acc@5: 1.769 ( 1.620) +Train: 21 [ 0/19 ( 5%)] Loss: 6.60 (6.60) Time: 1.553s, 659.41/s (1.553s, 659.41/s) LR: 3.810e-01 Data: 1.182 (1.182) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 6.926 ( 6.926) Acc@1: 0.488 ( 0.488) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 6.911 ( 6.911) Acc@1: 0.825 ( 0.538) Acc@5: 2.123 ( 1.776) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-19.pth.tar', 0.5499999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-21.pth.tar', 0.5380000000190734) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-11.pth.tar', 0.4919999995803833) + +Train: 22 [ 0/19 ( 5%)] Loss: 6.57 (6.57) Time: 1.736s, 589.78/s (1.736s, 589.78/s) LR: 3.791e-01 Data: 1.130 (1.130) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 6.921 ( 6.921) Acc@1: 0.195 ( 0.195) Acc@5: 0.879 ( 0.879) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 6.906 ( 6.907) Acc@1: 0.472 ( 0.566) Acc@5: 2.241 ( 1.968) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-19.pth.tar', 0.5499999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-21.pth.tar', 0.5380000000190734) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-9.pth.tar', 0.501999999294281) + +Train: 23 [ 0/19 ( 5%)] Loss: 6.56 (6.56) Time: 1.851s, 553.31/s (1.851s, 553.31/s) LR: 3.772e-01 Data: 1.287 (1.287) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 6.960 ( 6.960) Acc@1: 0.293 ( 0.293) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 6.961 ( 6.957) Acc@1: 0.472 ( 0.432) Acc@5: 1.887 ( 1.666) +Train: 24 [ 0/19 ( 5%)] Loss: 6.58 (6.58) Time: 1.503s, 681.39/s (1.503s, 681.39/s) LR: 3.753e-01 Data: 1.130 (1.130) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 6.941 ( 6.941) Acc@1: 0.586 ( 0.586) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 6.954 ( 6.950) Acc@1: 0.590 ( 0.470) Acc@5: 2.123 ( 1.782) +Train: 25 [ 0/19 ( 5%)] Loss: 6.54 (6.54) Time: 1.714s, 597.41/s (1.714s, 597.41/s) LR: 3.732e-01 Data: 1.343 (1.343) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 6.937 ( 6.937) Acc@1: 0.391 ( 0.391) Acc@5: 1.367 ( 1.367) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 6.922 ( 6.939) Acc@1: 0.825 ( 0.492) Acc@5: 2.123 ( 1.794) +Train: 26 [ 0/19 ( 5%)] Loss: 6.54 (6.54) Time: 1.825s, 561.23/s (1.825s, 561.23/s) LR: 3.711e-01 Data: 1.454 (1.454) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 6.929 ( 6.929) Acc@1: 0.195 ( 0.195) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 6.945 ( 6.935) Acc@1: 0.472 ( 0.554) Acc@5: 2.241 ( 1.926) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-26.pth.tar', 0.5539999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-19.pth.tar', 0.5499999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-21.pth.tar', 0.5380000000190734) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-8.pth.tar', 0.501999999294281) + +Train: 27 [ 0/19 ( 5%)] Loss: 6.52 (6.52) Time: 1.769s, 578.85/s (1.769s, 578.85/s) LR: 3.689e-01 Data: 1.398 (1.398) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 6.942 ( 6.942) Acc@1: 0.293 ( 0.293) Acc@5: 1.270 ( 1.270) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 6.926 ( 6.937) Acc@1: 0.590 ( 0.596) Acc@5: 2.123 ( 1.964) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-26.pth.tar', 0.5539999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-19.pth.tar', 0.5499999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-21.pth.tar', 0.5380000000190734) + +Train: 28 [ 0/19 ( 5%)] Loss: 6.52 (6.52) Time: 1.540s, 664.85/s (1.540s, 664.85/s) LR: 3.666e-01 Data: 1.169 (1.169) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 6.939 ( 6.939) Acc@1: 0.391 ( 0.391) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 6.946 ( 6.946) Acc@1: 0.472 ( 0.580) Acc@5: 2.241 ( 1.938) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-26.pth.tar', 0.5539999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-19.pth.tar', 0.5499999998664856) + +Train: 29 [ 0/19 ( 5%)] Loss: 6.51 (6.51) Time: 1.857s, 551.54/s (1.857s, 551.54/s) LR: 3.642e-01 Data: 1.485 (1.485) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 6.969 ( 6.969) Acc@1: 0.391 ( 0.391) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 6.957 ( 6.967) Acc@1: 0.354 ( 0.516) Acc@5: 1.887 ( 1.776) +Train: 30 [ 0/19 ( 5%)] Loss: 6.50 (6.50) Time: 1.518s, 674.75/s (1.518s, 674.75/s) LR: 3.618e-01 Data: 1.147 (1.147) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 6.967 ( 6.967) Acc@1: 0.293 ( 0.293) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 6.988 ( 6.995) Acc@1: 0.590 ( 0.526) Acc@5: 1.533 ( 1.802) +Train: 31 [ 0/19 ( 5%)] Loss: 6.48 (6.48) Time: 1.826s, 560.64/s (1.826s, 560.64/s) LR: 3.593e-01 Data: 1.454 (1.454) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.957 ( 6.957) Acc@1: 0.293 ( 0.293) Acc@5: 1.172 ( 1.172) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 6.968 ( 6.982) Acc@1: 0.590 ( 0.558) Acc@5: 2.123 ( 1.906) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-31.pth.tar', 0.5579999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-26.pth.tar', 0.5539999998664856) + +Train: 32 [ 0/19 ( 5%)] Loss: 6.44 (6.44) Time: 1.624s, 630.48/s (1.624s, 630.48/s) LR: 3.567e-01 Data: 1.161 (1.161) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.980 ( 6.980) Acc@1: 0.293 ( 0.293) Acc@5: 1.074 ( 1.074) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 6.983 ( 6.982) Acc@1: 0.708 ( 0.478) Acc@5: 2.123 ( 1.766) +Train: 33 [ 0/19 ( 5%)] Loss: 6.42 (6.42) Time: 1.567s, 653.43/s (1.567s, 653.43/s) LR: 3.541e-01 Data: 1.152 (1.152) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 6.999 ( 6.999) Acc@1: 0.391 ( 0.391) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 6.989 ( 7.015) Acc@1: 0.708 ( 0.514) Acc@5: 2.123 ( 1.810) +Train: 34 [ 0/19 ( 5%)] Loss: 6.42 (6.42) Time: 1.743s, 587.38/s (1.743s, 587.38/s) LR: 3.514e-01 Data: 1.261 (1.261) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.398 (1.398) Loss: 6.975 ( 6.975) Acc@1: 0.391 ( 0.391) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.010 ( 6.999) Acc@1: 0.236 ( 0.520) Acc@5: 1.415 ( 1.770) +Train: 35 [ 0/19 ( 5%)] Loss: 6.39 (6.39) Time: 1.602s, 639.32/s (1.602s, 639.32/s) LR: 3.486e-01 Data: 1.230 (1.230) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 7.011 ( 7.011) Acc@1: 0.488 ( 0.488) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 7.024 ( 7.035) Acc@1: 0.590 ( 0.546) Acc@5: 2.358 ( 1.862) +Train: 36 [ 0/19 ( 5%)] Loss: 6.41 (6.41) Time: 1.486s, 689.15/s (1.486s, 689.15/s) LR: 3.458e-01 Data: 1.070 (1.070) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.988 ( 6.988) Acc@1: 0.488 ( 0.488) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 6.998 ( 7.013) Acc@1: 0.590 ( 0.536) Acc@5: 1.769 ( 1.912) +Train: 37 [ 0/19 ( 5%)] Loss: 6.39 (6.39) Time: 1.702s, 601.71/s (1.702s, 601.71/s) LR: 3.429e-01 Data: 1.331 (1.331) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 7.035 ( 7.035) Acc@1: 0.391 ( 0.391) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 7.016 ( 7.048) Acc@1: 0.708 ( 0.502) Acc@5: 1.297 ( 1.796) +Train: 38 [ 0/19 ( 5%)] Loss: 6.34 (6.34) Time: 1.877s, 545.58/s (1.877s, 545.58/s) LR: 3.399e-01 Data: 1.505 (1.505) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.405 (1.405) Loss: 7.033 ( 7.033) Acc@1: 0.000 ( 0.000) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 7.040 ( 7.045) Acc@1: 0.354 ( 0.486) Acc@5: 1.651 ( 1.882) +Train: 39 [ 0/19 ( 5%)] Loss: 6.39 (6.39) Time: 1.667s, 614.41/s (1.667s, 614.41/s) LR: 3.369e-01 Data: 1.295 (1.295) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 7.018 ( 7.018) Acc@1: 0.684 ( 0.684) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 7.028 ( 7.047) Acc@1: 0.943 ( 0.568) Acc@5: 2.830 ( 2.030) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-39.pth.tar', 0.5679999987220764) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-31.pth.tar', 0.5579999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-16.pth.tar', 0.5560000000190735) + +Train: 40 [ 0/19 ( 5%)] Loss: 6.32 (6.32) Time: 1.809s, 566.10/s (1.809s, 566.10/s) LR: 3.338e-01 Data: 1.437 (1.437) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 7.041 ( 7.041) Acc@1: 0.391 ( 0.391) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 7.053 ( 7.057) Acc@1: 0.825 ( 0.554) Acc@5: 2.005 ( 1.912) +Train: 41 [ 0/19 ( 5%)] Loss: 6.26 (6.26) Time: 1.667s, 614.22/s (1.667s, 614.22/s) LR: 3.307e-01 Data: 1.110 (1.110) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.408 (1.408) Loss: 7.062 ( 7.062) Acc@1: 0.293 ( 0.293) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.087 (0.328) Loss: 7.045 ( 7.093) Acc@1: 0.472 ( 0.466) Acc@5: 2.241 ( 1.810) +Train: 42 [ 0/19 ( 5%)] Loss: 6.26 (6.26) Time: 1.718s, 595.93/s (1.718s, 595.93/s) LR: 3.275e-01 Data: 1.345 (1.345) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.419 (1.419) Loss: 7.044 ( 7.044) Acc@1: 0.586 ( 0.586) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.035 ( 7.079) Acc@1: 0.590 ( 0.544) Acc@5: 2.476 ( 1.882) +Train: 43 [ 0/19 ( 5%)] Loss: 6.26 (6.26) Time: 1.537s, 666.22/s (1.537s, 666.22/s) LR: 3.242e-01 Data: 1.163 (1.163) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 7.176 ( 7.176) Acc@1: 0.781 ( 0.781) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.144 ( 7.181) Acc@1: 0.708 ( 0.482) Acc@5: 2.123 ( 1.776) +Train: 44 [ 0/19 ( 5%)] Loss: 6.25 (6.25) Time: 1.799s, 569.30/s (1.799s, 569.30/s) LR: 3.209e-01 Data: 1.427 (1.427) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.394 (1.394) Loss: 7.082 ( 7.082) Acc@1: 0.684 ( 0.684) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 7.099 ( 7.116) Acc@1: 0.472 ( 0.510) Acc@5: 1.769 ( 1.798) +Train: 45 [ 0/19 ( 5%)] Loss: 6.25 (6.25) Time: 1.718s, 596.06/s (1.718s, 596.06/s) LR: 3.176e-01 Data: 1.167 (1.167) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 7.132 ( 7.132) Acc@1: 0.684 ( 0.684) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 7.097 ( 7.128) Acc@1: 0.236 ( 0.468) Acc@5: 1.769 ( 1.842) +Train: 46 [ 0/19 ( 5%)] Loss: 6.24 (6.24) Time: 1.629s, 628.52/s (1.629s, 628.52/s) LR: 3.141e-01 Data: 1.258 (1.258) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.427 (1.427) Loss: 7.121 ( 7.121) Acc@1: 0.586 ( 0.586) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 7.116 ( 7.143) Acc@1: 0.825 ( 0.578) Acc@5: 1.415 ( 1.772) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-39.pth.tar', 0.5679999987220764) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-31.pth.tar', 0.5579999995803833) + +Train: 47 [ 0/19 ( 5%)] Loss: 6.17 (6.17) Time: 1.665s, 614.84/s (1.665s, 614.84/s) LR: 3.107e-01 Data: 1.294 (1.294) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.435 (1.435) Loss: 7.129 ( 7.129) Acc@1: 0.391 ( 0.391) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.103 ( 7.136) Acc@1: 0.590 ( 0.450) Acc@5: 2.476 ( 1.852) +Train: 48 [ 0/19 ( 5%)] Loss: 6.14 (6.14) Time: 1.480s, 691.73/s (1.480s, 691.73/s) LR: 3.072e-01 Data: 1.108 (1.108) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 7.115 ( 7.115) Acc@1: 0.488 ( 0.488) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.115 ( 7.147) Acc@1: 0.354 ( 0.522) Acc@5: 1.651 ( 1.922) +Train: 49 [ 0/19 ( 5%)] Loss: 6.13 (6.13) Time: 1.563s, 655.30/s (1.563s, 655.30/s) LR: 3.036e-01 Data: 1.151 (1.151) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 7.144 ( 7.144) Acc@1: 0.781 ( 0.781) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.088 (0.321) Loss: 7.144 ( 7.170) Acc@1: 0.708 ( 0.558) Acc@5: 1.769 ( 1.946) +Train: 50 [ 0/19 ( 5%)] Loss: 6.14 (6.14) Time: 1.775s, 576.90/s (1.775s, 576.90/s) LR: 3.000e-01 Data: 1.162 (1.162) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 7.228 ( 7.228) Acc@1: 0.586 ( 0.586) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.213 ( 7.233) Acc@1: 0.354 ( 0.442) Acc@5: 2.005 ( 1.762) +Train: 51 [ 0/19 ( 5%)] Loss: 6.18 (6.18) Time: 1.668s, 614.00/s (1.668s, 614.00/s) LR: 2.964e-01 Data: 1.297 (1.297) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.427 (1.427) Loss: 7.181 ( 7.181) Acc@1: 0.391 ( 0.391) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.157 ( 7.209) Acc@1: 0.354 ( 0.548) Acc@5: 1.769 ( 1.982) +Train: 52 [ 0/19 ( 5%)] Loss: 6.13 (6.13) Time: 1.566s, 654.06/s (1.566s, 654.06/s) LR: 2.927e-01 Data: 1.158 (1.158) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.450 (1.450) Loss: 7.193 ( 7.193) Acc@1: 0.586 ( 0.586) Acc@5: 2.148 ( 2.148) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 7.149 ( 7.213) Acc@1: 0.354 ( 0.530) Acc@5: 1.415 ( 1.858) +Train: 53 [ 0/19 ( 5%)] Loss: 6.09 (6.09) Time: 1.483s, 690.38/s (1.483s, 690.38/s) LR: 2.889e-01 Data: 1.112 (1.112) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 7.285 ( 7.285) Acc@1: 0.391 ( 0.391) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 7.228 ( 7.295) Acc@1: 0.236 ( 0.498) Acc@5: 1.533 ( 1.880) +Train: 54 [ 0/19 ( 5%)] Loss: 6.03 (6.03) Time: 1.692s, 605.13/s (1.692s, 605.13/s) LR: 2.852e-01 Data: 1.320 (1.320) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.433 (1.433) Loss: 7.274 ( 7.274) Acc@1: 0.293 ( 0.293) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.087 (0.326) Loss: 7.187 ( 7.273) Acc@1: 0.590 ( 0.512) Acc@5: 2.358 ( 1.894) +Train: 55 [ 0/19 ( 5%)] Loss: 6.07 (6.07) Time: 1.685s, 607.67/s (1.685s, 607.67/s) LR: 2.813e-01 Data: 1.312 (1.312) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 7.319 ( 7.319) Acc@1: 0.391 ( 0.391) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.251 ( 7.322) Acc@1: 0.472 ( 0.494) Acc@5: 2.241 ( 1.862) +Train: 56 [ 0/19 ( 5%)] Loss: 6.04 (6.04) Time: 1.914s, 534.87/s (1.914s, 534.87/s) LR: 2.775e-01 Data: 1.232 (1.232) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 7.284 ( 7.284) Acc@1: 0.391 ( 0.391) Acc@5: 2.148 ( 2.148) +Test: [ 48/48] Time: 0.088 (0.326) Loss: 7.213 ( 7.294) Acc@1: 0.472 ( 0.514) Acc@5: 2.005 ( 1.894) +Train: 57 [ 0/19 ( 5%)] Loss: 5.97 (5.97) Time: 1.512s, 677.39/s (1.512s, 677.39/s) LR: 2.736e-01 Data: 1.137 (1.137) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 7.252 ( 7.252) Acc@1: 0.977 ( 0.977) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.273 ( 7.290) Acc@1: 0.354 ( 0.510) Acc@5: 1.651 ( 1.986) +Train: 58 [ 0/19 ( 5%)] Loss: 5.97 (5.97) Time: 1.728s, 592.66/s (1.728s, 592.66/s) LR: 2.697e-01 Data: 1.354 (1.354) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 7.399 ( 7.399) Acc@1: 0.488 ( 0.488) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 7.303 ( 7.345) Acc@1: 0.354 ( 0.512) Acc@5: 1.533 ( 1.892) +Train: 59 [ 0/19 ( 5%)] Loss: 5.95 (5.95) Time: 1.580s, 647.96/s (1.580s, 647.96/s) LR: 2.658e-01 Data: 1.208 (1.208) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 7.320 ( 7.320) Acc@1: 0.391 ( 0.391) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.255 ( 7.333) Acc@1: 0.354 ( 0.490) Acc@5: 1.887 ( 2.018) +Train: 60 [ 0/19 ( 5%)] Loss: 5.92 (5.92) Time: 1.695s, 604.10/s (1.695s, 604.10/s) LR: 2.618e-01 Data: 1.322 (1.322) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.466 (1.466) Loss: 7.361 ( 7.361) Acc@1: 0.391 ( 0.391) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 7.355 ( 7.411) Acc@1: 0.590 ( 0.518) Acc@5: 2.358 ( 1.938) +Train: 61 [ 0/19 ( 5%)] Loss: 5.92 (5.92) Time: 1.519s, 674.02/s (1.519s, 674.02/s) LR: 2.578e-01 Data: 1.148 (1.148) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 7.299 ( 7.299) Acc@1: 0.781 ( 0.781) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.261 ( 7.297) Acc@1: 0.708 ( 0.534) Acc@5: 1.769 ( 1.934) +Train: 62 [ 0/19 ( 5%)] Loss: 5.86 (5.86) Time: 1.609s, 636.34/s (1.609s, 636.34/s) LR: 2.538e-01 Data: 1.238 (1.238) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 7.321 ( 7.321) Acc@1: 0.391 ( 0.391) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.300 ( 7.346) Acc@1: 0.590 ( 0.508) Acc@5: 3.184 ( 1.978) +Train: 63 [ 0/19 ( 5%)] Loss: 5.84 (5.84) Time: 1.474s, 694.56/s (1.474s, 694.56/s) LR: 2.497e-01 Data: 1.070 (1.070) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 7.380 ( 7.380) Acc@1: 0.293 ( 0.293) Acc@5: 2.734 ( 2.734) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 7.342 ( 7.394) Acc@1: 0.354 ( 0.560) Acc@5: 1.651 ( 2.074) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-39.pth.tar', 0.5679999987220764) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-63.pth.tar', 0.5600000001525879) + +Train: 64 [ 0/19 ( 5%)] Loss: 5.78 (5.78) Time: 1.555s, 658.47/s (1.555s, 658.47/s) LR: 2.457e-01 Data: 1.183 (1.183) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 7.445 ( 7.445) Acc@1: 0.391 ( 0.391) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.367 ( 7.446) Acc@1: 0.590 ( 0.534) Acc@5: 2.358 ( 2.074) +Train: 65 [ 0/19 ( 5%)] Loss: 5.72 (5.72) Time: 1.895s, 540.33/s (1.895s, 540.33/s) LR: 2.416e-01 Data: 1.524 (1.524) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 7.479 ( 7.479) Acc@1: 0.488 ( 0.488) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.447 ( 7.477) Acc@1: 0.236 ( 0.478) Acc@5: 2.123 ( 1.828) +Train: 66 [ 0/19 ( 5%)] Loss: 5.78 (5.78) Time: 1.606s, 637.73/s (1.606s, 637.73/s) LR: 2.375e-01 Data: 1.143 (1.143) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 7.560 ( 7.560) Acc@1: 0.488 ( 0.488) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 7.512 ( 7.556) Acc@1: 0.825 ( 0.568) Acc@5: 1.769 ( 1.956) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-66.pth.tar', 0.5680000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-39.pth.tar', 0.5679999987220764) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-18.pth.tar', 0.5620000001525879) + +Train: 67 [ 0/19 ( 5%)] Loss: 5.71 (5.71) Time: 1.680s, 609.43/s (1.680s, 609.43/s) LR: 2.334e-01 Data: 1.310 (1.310) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 7.507 ( 7.507) Acc@1: 0.488 ( 0.488) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.457 ( 7.519) Acc@1: 0.354 ( 0.520) Acc@5: 1.651 ( 1.990) +Train: 68 [ 0/19 ( 5%)] Loss: 5.69 (5.69) Time: 1.735s, 590.24/s (1.735s, 590.24/s) LR: 2.292e-01 Data: 1.363 (1.363) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 7.523 ( 7.523) Acc@1: 0.684 ( 0.684) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.447 ( 7.536) Acc@1: 0.825 ( 0.534) Acc@5: 1.887 ( 1.938) +Train: 69 [ 0/19 ( 5%)] Loss: 5.70 (5.70) Time: 1.580s, 648.28/s (1.580s, 648.28/s) LR: 2.251e-01 Data: 1.207 (1.207) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.413 (1.413) Loss: 7.492 ( 7.492) Acc@1: 0.684 ( 0.684) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.480 ( 7.519) Acc@1: 0.236 ( 0.500) Acc@5: 2.005 ( 1.998) +Train: 70 [ 0/19 ( 5%)] Loss: 5.61 (5.61) Time: 1.512s, 677.29/s (1.512s, 677.29/s) LR: 2.209e-01 Data: 1.141 (1.141) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 7.514 ( 7.514) Acc@1: 0.977 ( 0.977) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.470 ( 7.536) Acc@1: 0.825 ( 0.580) Acc@5: 2.005 ( 1.954) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-66.pth.tar', 0.5680000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-39.pth.tar', 0.5679999987220764) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-22.pth.tar', 0.5659999998664856) + +Train: 71 [ 0/19 ( 5%)] Loss: 5.61 (5.61) Time: 1.426s, 717.89/s (1.426s, 717.89/s) LR: 2.167e-01 Data: 1.052 (1.052) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 7.610 ( 7.610) Acc@1: 0.684 ( 0.684) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.573 ( 7.640) Acc@1: 0.118 ( 0.470) Acc@5: 1.297 ( 1.874) +Train: 72 [ 0/19 ( 5%)] Loss: 5.58 (5.58) Time: 1.571s, 651.96/s (1.571s, 651.96/s) LR: 2.126e-01 Data: 1.078 (1.078) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.419 (1.419) Loss: 7.543 ( 7.543) Acc@1: 0.488 ( 0.488) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 7.516 ( 7.560) Acc@1: 0.354 ( 0.504) Acc@5: 1.533 ( 1.930) +Train: 73 [ 0/19 ( 5%)] Loss: 5.53 (5.53) Time: 1.810s, 565.78/s (1.810s, 565.78/s) LR: 2.084e-01 Data: 1.437 (1.437) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.484 (1.484) Loss: 7.682 ( 7.682) Acc@1: 0.293 ( 0.293) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.644 ( 7.687) Acc@1: 0.708 ( 0.552) Acc@5: 1.769 ( 2.048) +Train: 74 [ 0/19 ( 5%)] Loss: 5.57 (5.57) Time: 1.555s, 658.48/s (1.555s, 658.48/s) LR: 2.042e-01 Data: 1.183 (1.183) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 7.602 ( 7.602) Acc@1: 0.586 ( 0.586) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.087 (0.326) Loss: 7.620 ( 7.666) Acc@1: 0.236 ( 0.532) Acc@5: 1.651 ( 1.998) +Train: 75 [ 0/19 ( 5%)] Loss: 5.44 (5.44) Time: 1.510s, 678.34/s (1.510s, 678.34/s) LR: 2.000e-01 Data: 1.126 (1.126) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.409 (1.409) Loss: 7.727 ( 7.727) Acc@1: 0.488 ( 0.488) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 7.669 ( 7.751) Acc@1: 0.825 ( 0.466) Acc@5: 1.415 ( 1.888) +Train: 76 [ 0/19 ( 5%)] Loss: 5.39 (5.39) Time: 1.725s, 593.48/s (1.725s, 593.48/s) LR: 1.958e-01 Data: 1.243 (1.243) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.410 (1.410) Loss: 7.679 ( 7.679) Acc@1: 0.488 ( 0.488) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.641 ( 7.715) Acc@1: 0.708 ( 0.494) Acc@5: 1.769 ( 1.884) +Train: 77 [ 0/19 ( 5%)] Loss: 5.45 (5.45) Time: 1.705s, 600.54/s (1.705s, 600.54/s) LR: 1.916e-01 Data: 1.333 (1.333) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.433 (1.433) Loss: 7.842 ( 7.842) Acc@1: 0.293 ( 0.293) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 7.799 ( 7.850) Acc@1: 0.472 ( 0.548) Acc@5: 1.887 ( 1.990) +Train: 78 [ 0/19 ( 5%)] Loss: 5.41 (5.41) Time: 1.791s, 571.89/s (1.791s, 571.89/s) LR: 1.874e-01 Data: 1.418 (1.418) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 7.778 ( 7.778) Acc@1: 0.488 ( 0.488) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.742 ( 7.813) Acc@1: 0.472 ( 0.546) Acc@5: 2.476 ( 1.968) +Train: 79 [ 0/19 ( 5%)] Loss: 5.34 (5.34) Time: 1.581s, 647.75/s (1.581s, 647.75/s) LR: 1.833e-01 Data: 1.208 (1.208) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 7.795 ( 7.795) Acc@1: 0.293 ( 0.293) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.793 ( 7.820) Acc@1: 0.708 ( 0.558) Acc@5: 2.476 ( 1.990) +Train: 80 [ 0/19 ( 5%)] Loss: 5.27 (5.27) Time: 1.626s, 629.93/s (1.626s, 629.93/s) LR: 1.791e-01 Data: 1.134 (1.134) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.427 (1.427) Loss: 7.782 ( 7.782) Acc@1: 0.684 ( 0.684) Acc@5: 2.832 ( 2.832) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 7.760 ( 7.837) Acc@1: 0.943 ( 0.584) Acc@5: 2.005 ( 1.954) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-66.pth.tar', 0.5680000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-39.pth.tar', 0.5679999987220764) + +Train: 81 [ 0/19 ( 5%)] Loss: 5.21 (5.21) Time: 1.605s, 638.09/s (1.605s, 638.09/s) LR: 1.749e-01 Data: 1.233 (1.233) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 7.891 ( 7.891) Acc@1: 0.684 ( 0.684) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 7.819 ( 7.912) Acc@1: 0.943 ( 0.572) Acc@5: 2.123 ( 2.026) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-81.pth.tar', 0.5719999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-66.pth.tar', 0.5680000000190735) + +Train: 82 [ 0/19 ( 5%)] Loss: 5.24 (5.24) Time: 1.707s, 599.90/s (1.707s, 599.90/s) LR: 1.708e-01 Data: 1.188 (1.188) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 7.857 ( 7.857) Acc@1: 0.781 ( 0.781) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 7.767 ( 7.862) Acc@1: 1.179 ( 0.594) Acc@5: 2.005 ( 2.046) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-81.pth.tar', 0.5719999997329712) + +Train: 83 [ 0/19 ( 5%)] Loss: 5.16 (5.16) Time: 1.705s, 600.66/s (1.705s, 600.66/s) LR: 1.666e-01 Data: 1.164 (1.164) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 7.890 ( 7.890) Acc@1: 0.488 ( 0.488) Acc@5: 2.148 ( 2.148) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.872 ( 7.928) Acc@1: 0.590 ( 0.570) Acc@5: 2.241 ( 1.996) +Train: 84 [ 0/19 ( 5%)] Loss: 5.13 (5.13) Time: 1.656s, 618.48/s (1.656s, 618.48/s) LR: 1.625e-01 Data: 1.095 (1.095) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 8.006 ( 8.006) Acc@1: 0.684 ( 0.684) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 7.938 ( 8.003) Acc@1: 0.472 ( 0.576) Acc@5: 2.594 ( 2.058) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-84.pth.tar', 0.5759999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-15.pth.tar', 0.5759999990081787) + +Train: 85 [ 0/19 ( 5%)] Loss: 5.06 (5.06) Time: 1.563s, 655.31/s (1.563s, 655.31/s) LR: 1.584e-01 Data: 1.039 (1.039) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 8.010 ( 8.010) Acc@1: 0.586 ( 0.586) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 7.978 ( 8.031) Acc@1: 0.708 ( 0.630) Acc@5: 1.651 ( 2.050) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-84.pth.tar', 0.5759999998664856) + +Train: 86 [ 0/19 ( 5%)] Loss: 5.06 (5.06) Time: 1.554s, 658.78/s (1.554s, 658.78/s) LR: 1.543e-01 Data: 1.114 (1.114) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 7.980 ( 7.980) Acc@1: 0.781 ( 0.781) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.321) Loss: 7.934 ( 7.990) Acc@1: 0.590 ( 0.606) Acc@5: 1.769 ( 2.188) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-46.pth.tar', 0.5780000000190735) + +Train: 87 [ 0/19 ( 5%)] Loss: 4.98 (4.98) Time: 1.543s, 663.60/s (1.543s, 663.60/s) LR: 1.503e-01 Data: 1.119 (1.119) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 8.257 ( 8.257) Acc@1: 0.488 ( 0.488) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.326) Loss: 8.166 ( 8.236) Acc@1: 0.472 ( 0.532) Acc@5: 2.005 ( 1.970) +Train: 88 [ 0/19 ( 5%)] Loss: 4.96 (4.96) Time: 1.804s, 567.67/s (1.804s, 567.67/s) LR: 1.462e-01 Data: 1.432 (1.432) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 8.061 ( 8.061) Acc@1: 0.488 ( 0.488) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 8.044 ( 8.093) Acc@1: 0.590 ( 0.512) Acc@5: 2.123 ( 2.050) +Train: 89 [ 0/19 ( 5%)] Loss: 4.94 (4.94) Time: 1.537s, 666.19/s (1.537s, 666.19/s) LR: 1.422e-01 Data: 1.166 (1.166) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.413 (1.413) Loss: 8.170 ( 8.170) Acc@1: 0.391 ( 0.391) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.084 ( 8.140) Acc@1: 0.825 ( 0.538) Acc@5: 2.358 ( 1.878) +Train: 90 [ 0/19 ( 5%)] Loss: 4.87 (4.87) Time: 1.643s, 623.16/s (1.643s, 623.16/s) LR: 1.382e-01 Data: 1.187 (1.187) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.398 (1.398) Loss: 8.155 ( 8.155) Acc@1: 0.586 ( 0.586) Acc@5: 1.465 ( 1.465) +Test: [ 48/48] Time: 0.088 (0.326) Loss: 8.077 ( 8.171) Acc@1: 0.825 ( 0.576) Acc@5: 1.769 ( 1.972) +Train: 91 [ 0/19 ( 5%)] Loss: 4.84 (4.84) Time: 1.691s, 605.65/s (1.691s, 605.65/s) LR: 1.342e-01 Data: 1.314 (1.314) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 8.300 ( 8.300) Acc@1: 0.391 ( 0.391) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 8.266 ( 8.276) Acc@1: 0.708 ( 0.532) Acc@5: 2.830 ( 2.082) +Train: 92 [ 0/19 ( 5%)] Loss: 4.80 (4.80) Time: 1.498s, 683.62/s (1.498s, 683.62/s) LR: 1.303e-01 Data: 0.993 (0.993) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 8.272 ( 8.272) Acc@1: 0.684 ( 0.684) Acc@5: 2.148 ( 2.148) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 8.198 ( 8.260) Acc@1: 0.472 ( 0.534) Acc@5: 1.651 ( 1.964) +Train: 93 [ 0/19 ( 5%)] Loss: 4.72 (4.72) Time: 1.682s, 608.83/s (1.682s, 608.83/s) LR: 1.264e-01 Data: 1.285 (1.285) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.446 (1.446) Loss: 8.271 ( 8.271) Acc@1: 0.781 ( 0.781) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.259 ( 8.280) Acc@1: 0.590 ( 0.608) Acc@5: 2.358 ( 2.016) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-70.pth.tar', 0.5799999990081787) + +Train: 94 [ 0/19 ( 5%)] Loss: 4.72 (4.72) Time: 1.667s, 614.26/s (1.667s, 614.26/s) LR: 1.225e-01 Data: 1.295 (1.295) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.413 (1.413) Loss: 8.339 ( 8.339) Acc@1: 0.488 ( 0.488) Acc@5: 1.562 ( 1.562) +Test: [ 48/48] Time: 0.087 (0.324) Loss: 8.275 ( 8.341) Acc@1: 0.472 ( 0.558) Acc@5: 2.594 ( 1.966) +Train: 95 [ 0/19 ( 5%)] Loss: 4.70 (4.70) Time: 1.769s, 578.78/s (1.769s, 578.78/s) LR: 1.187e-01 Data: 1.397 (1.397) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.446 (1.446) Loss: 8.348 ( 8.348) Acc@1: 0.586 ( 0.586) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.286 ( 8.391) Acc@1: 0.472 ( 0.508) Acc@5: 2.241 ( 1.978) +Train: 96 [ 0/19 ( 5%)] Loss: 4.56 (4.56) Time: 1.648s, 621.25/s (1.648s, 621.25/s) LR: 1.148e-01 Data: 1.277 (1.277) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 8.464 ( 8.464) Acc@1: 0.781 ( 0.781) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.416 ( 8.477) Acc@1: 0.354 ( 0.498) Acc@5: 2.241 ( 1.974) +Train: 97 [ 0/19 ( 5%)] Loss: 4.51 (4.51) Time: 1.655s, 618.62/s (1.655s, 618.62/s) LR: 1.111e-01 Data: 1.282 (1.282) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.462 (1.462) Loss: 8.398 ( 8.398) Acc@1: 0.977 ( 0.977) Acc@5: 2.832 ( 2.832) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 8.336 ( 8.395) Acc@1: 0.590 ( 0.576) Acc@5: 2.123 ( 2.090) +Train: 98 [ 0/19 ( 5%)] Loss: 4.48 (4.48) Time: 1.663s, 615.63/s (1.663s, 615.63/s) LR: 1.073e-01 Data: 1.291 (1.291) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 8.516 ( 8.516) Acc@1: 0.488 ( 0.488) Acc@5: 3.223 ( 3.223) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.513 ( 8.534) Acc@1: 0.708 ( 0.504) Acc@5: 2.241 ( 1.926) +Train: 99 [ 0/19 ( 5%)] Loss: 4.47 (4.47) Time: 1.617s, 633.35/s (1.617s, 633.35/s) LR: 1.036e-01 Data: 1.245 (1.245) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 8.494 ( 8.494) Acc@1: 0.586 ( 0.586) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.442 ( 8.511) Acc@1: 0.236 ( 0.566) Acc@5: 2.123 ( 2.012) +Train: 100 [ 0/19 ( 5%)] Loss: 4.33 (4.33) Time: 1.732s, 591.35/s (1.732s, 591.35/s) LR: 1.000e-01 Data: 1.360 (1.360) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 8.507 ( 8.507) Acc@1: 0.781 ( 0.781) Acc@5: 2.734 ( 2.734) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.414 ( 8.506) Acc@1: 0.590 ( 0.560) Acc@5: 2.358 ( 2.016) +Train: 101 [ 0/19 ( 5%)] Loss: 4.24 (4.24) Time: 1.527s, 670.69/s (1.527s, 670.69/s) LR: 9.639e-02 Data: 1.155 (1.155) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 8.577 ( 8.577) Acc@1: 0.977 ( 0.977) Acc@5: 2.832 ( 2.832) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.503 ( 8.551) Acc@1: 0.825 ( 0.576) Acc@5: 2.594 ( 1.988) +Train: 102 [ 0/19 ( 5%)] Loss: 4.25 (4.25) Time: 1.914s, 535.14/s (1.914s, 535.14/s) LR: 9.283e-02 Data: 1.541 (1.541) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.457 (1.457) Loss: 8.676 ( 8.676) Acc@1: 0.977 ( 0.977) Acc@5: 2.734 ( 2.734) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 8.603 ( 8.662) Acc@1: 0.354 ( 0.576) Acc@5: 1.887 ( 2.150) +Train: 103 [ 0/19 ( 5%)] Loss: 4.18 (4.18) Time: 1.759s, 582.11/s (1.759s, 582.11/s) LR: 8.932e-02 Data: 1.387 (1.387) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.443 (1.443) Loss: 8.554 ( 8.554) Acc@1: 0.293 ( 0.293) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.328) Loss: 8.518 ( 8.550) Acc@1: 0.590 ( 0.538) Acc@5: 2.358 ( 2.048) +Train: 104 [ 0/19 ( 5%)] Loss: 4.15 (4.15) Time: 1.688s, 606.52/s (1.688s, 606.52/s) LR: 8.586e-02 Data: 1.179 (1.179) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 8.981 ( 8.981) Acc@1: 0.586 ( 0.586) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.087 (0.327) Loss: 8.911 ( 8.986) Acc@1: 0.118 ( 0.536) Acc@5: 2.123 ( 2.016) +Train: 105 [ 0/19 ( 5%)] Loss: 4.05 (4.05) Time: 1.653s, 619.45/s (1.653s, 619.45/s) LR: 8.244e-02 Data: 1.281 (1.281) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 8.768 ( 8.768) Acc@1: 0.488 ( 0.488) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.087 (0.326) Loss: 8.652 ( 8.762) Acc@1: 0.472 ( 0.574) Acc@5: 2.241 ( 1.998) +Train: 106 [ 0/19 ( 5%)] Loss: 3.96 (3.96) Time: 1.676s, 610.83/s (1.676s, 610.83/s) LR: 7.908e-02 Data: 1.201 (1.201) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 8.742 ( 8.742) Acc@1: 0.391 ( 0.391) Acc@5: 2.148 ( 2.148) +Test: [ 48/48] Time: 0.088 (0.327) Loss: 8.672 ( 8.746) Acc@1: 0.354 ( 0.558) Acc@5: 1.769 ( 1.918) +Train: 107 [ 0/19 ( 5%)] Loss: 4.07 (4.07) Time: 1.505s, 680.60/s (1.505s, 680.60/s) LR: 7.577e-02 Data: 1.133 (1.133) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.455 (1.455) Loss: 8.915 ( 8.915) Acc@1: 0.488 ( 0.488) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.863 ( 8.912) Acc@1: 0.708 ( 0.546) Acc@5: 2.123 ( 1.968) +Train: 108 [ 0/19 ( 5%)] Loss: 3.91 (3.91) Time: 1.650s, 620.69/s (1.650s, 620.69/s) LR: 7.252e-02 Data: 1.278 (1.278) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.432 (1.432) Loss: 8.867 ( 8.867) Acc@1: 0.488 ( 0.488) Acc@5: 1.758 ( 1.758) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 8.820 ( 8.876) Acc@1: 0.354 ( 0.608) Acc@5: 1.887 ( 2.000) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-28.pth.tar', 0.5799999998664856) + +Train: 109 [ 0/19 ( 5%)] Loss: 3.93 (3.93) Time: 1.682s, 608.84/s (1.682s, 608.84/s) LR: 6.932e-02 Data: 1.309 (1.309) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 8.776 ( 8.776) Acc@1: 0.586 ( 0.586) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.699 ( 8.795) Acc@1: 0.708 ( 0.592) Acc@5: 2.476 ( 2.160) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-109.pth.tar', 0.5920000003051757) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-80.pth.tar', 0.5839999997329712) + +Train: 110 [ 0/19 ( 5%)] Loss: 3.78 (3.78) Time: 1.629s, 628.44/s (1.629s, 628.44/s) LR: 6.617e-02 Data: 1.216 (1.216) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 8.874 ( 8.874) Acc@1: 0.488 ( 0.488) Acc@5: 1.953 ( 1.953) +Test: [ 48/48] Time: 0.088 (0.321) Loss: 8.766 ( 8.872) Acc@1: 0.825 ( 0.526) Acc@5: 2.358 ( 2.006) +Train: 111 [ 0/19 ( 5%)] Loss: 3.73 (3.73) Time: 2.060s, 497.15/s (2.060s, 497.15/s) LR: 6.309e-02 Data: 1.685 (1.685) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 8.959 ( 8.959) Acc@1: 0.684 ( 0.684) Acc@5: 2.051 ( 2.051) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 8.922 ( 8.966) Acc@1: 0.354 ( 0.564) Acc@5: 1.651 ( 1.998) +Train: 112 [ 0/19 ( 5%)] Loss: 3.72 (3.72) Time: 1.580s, 648.27/s (1.580s, 648.27/s) LR: 6.007e-02 Data: 1.154 (1.154) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.427 (1.427) Loss: 8.833 ( 8.833) Acc@1: 0.781 ( 0.781) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.327) Loss: 8.716 ( 8.834) Acc@1: 0.472 ( 0.584) Acc@5: 2.358 ( 2.112) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-109.pth.tar', 0.5920000003051757) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-112.pth.tar', 0.5839999998664855) + +Train: 113 [ 0/19 ( 5%)] Loss: 3.73 (3.73) Time: 1.607s, 637.02/s (1.607s, 637.02/s) LR: 5.711e-02 Data: 1.222 (1.222) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 8.831 ( 8.831) Acc@1: 0.781 ( 0.781) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.831 ( 8.948) Acc@1: 0.590 ( 0.614) Acc@5: 1.769 ( 2.108) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-109.pth.tar', 0.5920000003051757) + +Train: 114 [ 0/19 ( 5%)] Loss: 3.62 (3.62) Time: 1.864s, 549.44/s (1.864s, 549.44/s) LR: 5.421e-02 Data: 1.489 (1.489) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 8.963 ( 8.963) Acc@1: 0.879 ( 0.879) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.326) Loss: 8.974 ( 9.018) Acc@1: 0.354 ( 0.566) Acc@5: 2.123 ( 2.044) +Train: 115 [ 0/19 ( 5%)] Loss: 3.56 (3.56) Time: 1.721s, 595.12/s (1.721s, 595.12/s) LR: 5.137e-02 Data: 1.346 (1.346) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 9.010 ( 9.010) Acc@1: 0.781 ( 0.781) Acc@5: 1.855 ( 1.855) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.983 ( 9.044) Acc@1: 0.708 ( 0.568) Acc@5: 2.005 ( 1.984) +Train: 116 [ 0/19 ( 5%)] Loss: 3.52 (3.52) Time: 1.638s, 625.20/s (1.638s, 625.20/s) LR: 4.860e-02 Data: 1.262 (1.262) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 9.005 ( 9.005) Acc@1: 0.391 ( 0.391) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.328) Loss: 8.901 ( 9.004) Acc@1: 0.590 ( 0.546) Acc@5: 2.123 ( 2.096) +Train: 117 [ 0/19 ( 5%)] Loss: 3.55 (3.55) Time: 1.544s, 663.08/s (1.544s, 663.08/s) LR: 4.590e-02 Data: 1.169 (1.169) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 8.993 ( 8.993) Acc@1: 0.781 ( 0.781) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.899 ( 8.978) Acc@1: 0.590 ( 0.648) Acc@5: 1.887 ( 2.060) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-13.pth.tar', 0.5939999995803833) + +Train: 118 [ 0/19 ( 5%)] Loss: 3.39 (3.39) Time: 1.680s, 609.36/s (1.680s, 609.36/s) LR: 4.326e-02 Data: 1.305 (1.305) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 8.997 ( 8.997) Acc@1: 0.391 ( 0.391) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.903 ( 8.985) Acc@1: 0.236 ( 0.568) Acc@5: 2.005 ( 2.084) +Train: 119 [ 0/19 ( 5%)] Loss: 3.33 (3.33) Time: 1.711s, 598.32/s (1.711s, 598.32/s) LR: 4.069e-02 Data: 1.336 (1.336) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.405 (1.405) Loss: 9.051 ( 9.051) Acc@1: 0.977 ( 0.977) Acc@5: 3.027 ( 3.027) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 9.043 ( 9.099) Acc@1: 0.236 ( 0.616) Acc@5: 1.887 ( 2.174) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-82.pth.tar', 0.5940000011825561) + +Train: 120 [ 0/19 ( 5%)] Loss: 3.23 (3.23) Time: 1.632s, 627.61/s (1.632s, 627.61/s) LR: 3.820e-02 Data: 1.256 (1.256) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 9.024 ( 9.024) Acc@1: 0.488 ( 0.488) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.925 ( 9.024) Acc@1: 0.590 ( 0.634) Acc@5: 2.830 ( 2.032) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-27.pth.tar', 0.5959999995803833) + +Train: 121 [ 0/19 ( 5%)] Loss: 3.29 (3.29) Time: 1.969s, 520.15/s (1.969s, 520.15/s) LR: 3.577e-02 Data: 1.594 (1.594) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 8.965 ( 8.965) Acc@1: 0.586 ( 0.586) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.944 ( 9.004) Acc@1: 0.236 ( 0.570) Acc@5: 2.241 ( 2.108) +Train: 122 [ 0/19 ( 5%)] Loss: 3.29 (3.29) Time: 1.536s, 666.82/s (1.536s, 666.82/s) LR: 3.342e-02 Data: 1.160 (1.160) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.408 (1.408) Loss: 9.078 ( 9.078) Acc@1: 0.586 ( 0.586) Acc@5: 1.660 ( 1.660) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 9.001 ( 9.073) Acc@1: 0.590 ( 0.570) Acc@5: 2.476 ( 2.110) +Train: 123 [ 0/19 ( 5%)] Loss: 3.23 (3.23) Time: 1.670s, 613.04/s (1.670s, 613.04/s) LR: 3.113e-02 Data: 1.295 (1.295) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 9.016 ( 9.016) Acc@1: 0.781 ( 0.781) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.938 ( 9.038) Acc@1: 0.472 ( 0.630) Acc@5: 2.005 ( 2.074) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-86.pth.tar', 0.6059999995803833) + +Train: 124 [ 0/19 ( 5%)] Loss: 3.21 (3.21) Time: 2.189s, 467.79/s (2.189s, 467.79/s) LR: 2.893e-02 Data: 1.814 (1.814) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 8.994 ( 8.994) Acc@1: 0.488 ( 0.488) Acc@5: 2.734 ( 2.734) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.939 ( 9.014) Acc@1: 0.590 ( 0.618) Acc@5: 2.476 ( 2.008) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-124.pth.tar', 0.6179999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-93.pth.tar', 0.6079999995803833) + +Train: 125 [ 0/19 ( 5%)] Loss: 3.23 (3.23) Time: 1.499s, 683.30/s (1.499s, 683.30/s) LR: 2.679e-02 Data: 1.081 (1.081) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 9.093 ( 9.093) Acc@1: 0.586 ( 0.586) Acc@5: 2.832 ( 2.832) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 9.026 ( 9.113) Acc@1: 0.236 ( 0.618) Acc@5: 2.005 ( 2.120) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-125.pth.tar', 0.6179999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-124.pth.tar', 0.6179999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-108.pth.tar', 0.6080000001525879) + +Train: 126 [ 0/19 ( 5%)] Loss: 3.07 (3.07) Time: 1.950s, 525.25/s (1.950s, 525.25/s) LR: 2.474e-02 Data: 1.257 (1.257) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 9.104 ( 9.104) Acc@1: 0.781 ( 0.781) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 9.000 ( 9.105) Acc@1: 0.236 ( 0.636) Acc@5: 2.594 ( 2.044) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-125.pth.tar', 0.6179999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-124.pth.tar', 0.6179999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-113.pth.tar', 0.6139999995803833) + +Train: 127 [ 0/19 ( 5%)] Loss: 3.17 (3.17) Time: 1.499s, 683.28/s (1.499s, 683.28/s) LR: 2.276e-02 Data: 1.122 (1.122) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 9.093 ( 9.093) Acc@1: 0.879 ( 0.879) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 9.013 ( 9.122) Acc@1: 0.472 ( 0.596) Acc@5: 2.830 ( 2.074) +Train: 128 [ 0/19 ( 5%)] Loss: 3.07 (3.07) Time: 1.583s, 646.74/s (1.583s, 646.74/s) LR: 2.086e-02 Data: 1.159 (1.159) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.399 (1.399) Loss: 9.011 ( 9.011) Acc@1: 0.977 ( 0.977) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.321) Loss: 8.905 ( 9.017) Acc@1: 0.472 ( 0.596) Acc@5: 2.594 ( 2.144) +Train: 129 [ 0/19 ( 5%)] Loss: 3.14 (3.14) Time: 1.836s, 557.62/s (1.836s, 557.62/s) LR: 1.903e-02 Data: 1.190 (1.190) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.425 (1.425) Loss: 9.116 ( 9.116) Acc@1: 0.586 ( 0.586) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.326) Loss: 8.949 ( 9.076) Acc@1: 0.236 ( 0.594) Acc@5: 2.241 ( 2.140) +Train: 130 [ 0/19 ( 5%)] Loss: 2.98 (2.98) Time: 1.533s, 667.91/s (1.533s, 667.91/s) LR: 1.729e-02 Data: 1.158 (1.158) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 9.126 ( 9.126) Acc@1: 0.977 ( 0.977) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 9.007 ( 9.115) Acc@1: 0.472 ( 0.610) Acc@5: 2.948 ( 2.154) +Train: 131 [ 0/19 ( 5%)] Loss: 3.04 (3.04) Time: 1.687s, 606.93/s (1.687s, 606.93/s) LR: 1.563e-02 Data: 1.215 (1.215) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.459 (1.459) Loss: 9.022 ( 9.022) Acc@1: 0.977 ( 0.977) Acc@5: 2.930 ( 2.930) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 8.929 ( 9.044) Acc@1: 0.472 ( 0.588) Acc@5: 2.241 ( 2.084) +Train: 132 [ 0/19 ( 5%)] Loss: 3.06 (3.06) Time: 1.896s, 540.09/s (1.896s, 540.09/s) LR: 1.404e-02 Data: 1.521 (1.521) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 9.081 ( 9.081) Acc@1: 1.074 ( 1.074) Acc@5: 2.734 ( 2.734) +Test: [ 48/48] Time: 0.087 (0.323) Loss: 8.955 ( 9.065) Acc@1: 0.472 ( 0.612) Acc@5: 2.476 ( 2.126) +Train: 133 [ 0/19 ( 5%)] Loss: 2.95 (2.95) Time: 1.559s, 656.97/s (1.559s, 656.97/s) LR: 1.254e-02 Data: 1.157 (1.157) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 9.032 ( 9.032) Acc@1: 0.684 ( 0.684) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.924 ( 9.032) Acc@1: 0.590 ( 0.622) Acc@5: 2.594 ( 2.102) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-133.pth.tar', 0.6219999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-125.pth.tar', 0.6179999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-124.pth.tar', 0.6179999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-119.pth.tar', 0.6159999999332428) + +Train: 134 [ 0/19 ( 5%)] Loss: 3.04 (3.04) Time: 1.639s, 624.94/s (1.639s, 624.94/s) LR: 1.112e-02 Data: 1.264 (1.264) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.408 (1.408) Loss: 9.079 ( 9.079) Acc@1: 0.781 ( 0.781) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.980 ( 9.064) Acc@1: 0.472 ( 0.604) Acc@5: 2.712 ( 2.094) +Train: 135 [ 0/19 ( 5%)] Loss: 2.96 (2.96) Time: 1.612s, 635.11/s (1.612s, 635.11/s) LR: 9.789e-03 Data: 1.130 (1.130) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 9.111 ( 9.111) Acc@1: 0.781 ( 0.781) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 9.001 ( 9.095) Acc@1: 0.118 ( 0.640) Acc@5: 2.594 ( 2.054) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar', 0.6399999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-133.pth.tar', 0.6219999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-125.pth.tar', 0.6179999999332428) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-124.pth.tar', 0.6179999995803833) + +Train: 136 [ 0/19 ( 5%)] Loss: 2.92 (2.92) Time: 1.663s, 615.62/s (1.663s, 615.62/s) LR: 8.536e-03 Data: 1.290 (1.290) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 9.115 ( 9.115) Acc@1: 0.684 ( 0.684) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 9.005 ( 9.102) Acc@1: 0.472 ( 0.614) Acc@5: 2.241 ( 2.100) +Train: 137 [ 0/19 ( 5%)] Loss: 2.99 (2.99) Time: 1.566s, 653.89/s (1.566s, 653.89/s) LR: 7.367e-03 Data: 1.191 (1.191) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 9.076 ( 9.076) Acc@1: 0.781 ( 0.781) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.970 ( 9.065) Acc@1: 0.472 ( 0.592) Acc@5: 2.594 ( 2.128) +Train: 138 [ 0/19 ( 5%)] Loss: 2.83 (2.83) Time: 1.666s, 614.74/s (1.666s, 614.74/s) LR: 6.283e-03 Data: 1.293 (1.293) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 9.062 ( 9.062) Acc@1: 0.781 ( 0.781) Acc@5: 2.246 ( 2.246) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.970 ( 9.077) Acc@1: 0.354 ( 0.594) Acc@5: 2.241 ( 2.076) +Train: 139 [ 0/19 ( 5%)] Loss: 2.89 (2.89) Time: 1.627s, 629.45/s (1.627s, 629.45/s) LR: 5.284e-03 Data: 1.253 (1.253) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.420 (1.420) Loss: 9.098 ( 9.098) Acc@1: 0.781 ( 0.781) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.970 ( 9.086) Acc@1: 0.590 ( 0.634) Acc@5: 2.712 ( 2.144) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar', 0.6399999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-133.pth.tar', 0.6219999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-125.pth.tar', 0.6179999999332428) + +Train: 140 [ 0/19 ( 5%)] Loss: 2.84 (2.84) Time: 1.486s, 689.07/s (1.486s, 689.07/s) LR: 4.370e-03 Data: 1.111 (1.111) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 9.059 ( 9.059) Acc@1: 0.781 ( 0.781) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.972 ( 9.060) Acc@1: 0.590 ( 0.620) Acc@5: 2.476 ( 2.134) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar', 0.6399999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-133.pth.tar', 0.6219999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-140.pth.tar', 0.6199999995803833) + +Train: 141 [ 0/19 ( 5%)] Loss: 2.80 (2.80) Time: 1.861s, 550.21/s (1.861s, 550.21/s) LR: 3.543e-03 Data: 1.485 (1.485) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.396 (1.396) Loss: 9.098 ( 9.098) Acc@1: 0.684 ( 0.684) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 9.001 ( 9.092) Acc@1: 0.472 ( 0.646) Acc@5: 2.358 ( 2.110) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-141.pth.tar', 0.6459999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar', 0.6399999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-133.pth.tar', 0.6219999995803833) + +Train: 142 [ 0/19 ( 5%)] Loss: 2.84 (2.84) Time: 1.614s, 634.52/s (1.614s, 634.52/s) LR: 2.801e-03 Data: 1.086 (1.086) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 9.062 ( 9.062) Acc@1: 0.781 ( 0.781) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.968 ( 9.062) Acc@1: 0.354 ( 0.632) Acc@5: 2.712 ( 2.130) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-141.pth.tar', 0.6459999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar', 0.6399999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-142.pth.tar', 0.6320000001525878) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-123.pth.tar', 0.6299999998664856) + +Train: 143 [ 0/19 ( 5%)] Loss: 2.79 (2.79) Time: 1.582s, 647.36/s (1.582s, 647.36/s) LR: 2.146e-03 Data: 1.208 (1.208) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 9.087 ( 9.087) Acc@1: 0.684 ( 0.684) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.087 (0.322) Loss: 8.970 ( 9.077) Acc@1: 0.236 ( 0.620) Acc@5: 2.358 ( 2.110) +Train: 144 [ 0/19 ( 5%)] Loss: 2.85 (2.85) Time: 1.760s, 581.96/s (1.760s, 581.96/s) LR: 1.577e-03 Data: 1.385 (1.385) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 9.086 ( 9.086) Acc@1: 0.781 ( 0.781) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.324) Loss: 8.964 ( 9.077) Acc@1: 0.354 ( 0.608) Acc@5: 2.712 ( 2.140) +Train: 145 [ 0/19 ( 5%)] Loss: 2.92 (2.92) Time: 1.577s, 649.26/s (1.577s, 649.26/s) LR: 1.096e-03 Data: 1.036 (1.036) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 9.089 ( 9.089) Acc@1: 0.684 ( 0.684) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.322) Loss: 8.970 ( 9.077) Acc@1: 0.472 ( 0.612) Acc@5: 2.358 ( 2.138) +Train: 146 [ 0/19 ( 5%)] Loss: 2.79 (2.79) Time: 1.641s, 624.03/s (1.641s, 624.03/s) LR: 7.014e-04 Data: 1.177 (1.177) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.399 (1.399) Loss: 9.067 ( 9.067) Acc@1: 0.586 ( 0.586) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.953 ( 9.058) Acc@1: 0.354 ( 0.618) Acc@5: 2.712 ( 2.136) +Train: 147 [ 0/19 ( 5%)] Loss: 2.85 (2.85) Time: 1.680s, 609.51/s (1.680s, 609.51/s) LR: 3.947e-04 Data: 1.306 (1.306) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 9.079 ( 9.079) Acc@1: 0.879 ( 0.879) Acc@5: 2.441 ( 2.441) +Test: [ 48/48] Time: 0.088 (0.323) Loss: 8.974 ( 9.074) Acc@1: 0.236 ( 0.630) Acc@5: 2.712 ( 2.124) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-117.pth.tar', 0.6479999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-141.pth.tar', 0.6459999998664856) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-135.pth.tar', 0.6399999999666214) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-17.pth.tar', 0.6379999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-126.pth.tar', 0.6359999999332427) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-120.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-139.pth.tar', 0.6339999995803833) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-142.pth.tar', 0.6320000001525878) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-85.pth.tar', 0.6300000003051758) + ('./output/train/ImageNetTraining80.0-frac-1over64/checkpoint-147.pth.tar', 0.6299999999332428) + +Train: 148 [ 0/19 ( 5%)] Loss: 2.80 (2.80) Time: 1.621s, 631.62/s (1.621s, 631.62/s) LR: 1.754e-04 Data: 1.246 (1.246) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 9.064 ( 9.064) Acc@1: 0.684 ( 0.684) Acc@5: 2.539 ( 2.539) +Test: [ 48/48] Time: 0.087 (0.325) Loss: 8.961 ( 9.061) Acc@1: 0.236 ( 0.624) Acc@5: 2.594 ( 2.138) +Train: 149 [ 0/19 ( 5%)] Loss: 2.80 (2.80) Time: 1.679s, 609.98/s (1.679s, 609.98/s) LR: 4.386e-05 Data: 1.302 (1.302) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 9.087 ( 9.087) Acc@1: 0.781 ( 0.781) Acc@5: 2.637 ( 2.637) +Test: [ 48/48] Time: 0.088 (0.325) Loss: 8.977 ( 9.075) Acc@1: 0.472 ( 0.620) Acc@5: 2.476 ( 2.124) +*** Best metric: 0.6479999995803833 (epoch 117) +--result +[ + { + "epoch": 147, + "train": { + "loss": 2.825033664703369 + }, + "validation": { + "loss": 9.073507239990235, + "top1": 0.6299999999332428, + "top5": 2.123999998474121 + } + }, + { + "epoch": 85, + "train": { + "loss": 5.206167221069336 + }, + "validation": { + "loss": 8.030916482696533, + "top1": 0.6300000003051758, + "top5": 2.0500000020599365 + } + }, + { + "epoch": 142, + "train": { + "loss": 2.8495898246765137 + }, + "validation": { + "loss": 9.062279708251953, + "top1": 0.6320000001525878, + "top5": 2.1300000025177 + } + }, + { + "epoch": 120, + "train": { + "loss": 3.4388296604156494 + }, + "validation": { + "loss": 9.024455751953125, + "top1": 0.6339999995803833, + "top5": 2.032000001220703 + } + }, + { + "epoch": 139, + "train": { + "loss": 2.907723903656006 + }, + "validation": { + "loss": 9.085828477783203, + "top1": 0.6339999995803833, + "top5": 2.1440000025177004 + } + }, + { + "epoch": 126, + "train": { + "loss": 3.1890621185302734 + }, + "validation": { + "loss": 9.104899918823243, + "top1": 0.6359999999332427, + "top5": 2.0439999997711182 + } + }, + { + "epoch": 17, + "train": { + "loss": 6.704622268676758 + }, + "validation": { + "loss": 6.889008323059082, + "top1": 0.6379999995803833, + "top5": 1.9500000009155274 + } + }, + { + "epoch": 135, + "train": { + "loss": 2.961143970489502 + }, + "validation": { + "loss": 9.09548891571045, + "top1": 0.6399999999666214, + "top5": 2.053999999771118 + } + }, + { + "epoch": 141, + "train": { + "loss": 2.8683888912200928 + }, + "validation": { + "loss": 9.09216304321289, + "top1": 0.6459999998664856, + "top5": 2.109999998321533 + } + }, + { + "epoch": 117, + "train": { + "loss": 3.5988922119140625 + }, + "validation": { + "loss": 8.977717391967774, + "top1": 0.6479999995803833, + "top5": 2.0599999994659424 + } + } +] diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/requirements.txt b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ad248760294040c96134c7d74e4270361d45f0a3 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/requirements.txt @@ -0,0 +1,110 @@ +GitPython==3.1.44 +MarkupSafe==2.1.5 +PyYAML==6.0.2 +aiofiles==23.2.1 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.11 +aiosignal==1.3.2 +annotated-types==0.7.0 +anyio==4.8.0 +async-timeout==5.0.1 +attrs==24.3.0 +certifi==2024.12.14 +charset-normalizer==3.4.1 +click==8.1.8 +contourpy==1.3.0 +cycler==0.12.1 +datasets==3.2.0 +dill==0.3.8 +docker-pycreds==0.4.0 +eval_type_backport==0.2.2 +exceptiongroup==1.2.2 +fastapi==0.115.6 +ffmpy==0.5.0 +filelock==3.16.1 +fonttools==4.55.3 +frozenlist==1.5.0 +fsspec==2024.9.0 +gitdb==4.0.12 +gradio==4.44.1 +gradio_client==1.3.0 +h11==0.14.0 +httpcore==1.0.7 +httptools==0.6.4 +httpx==0.28.1 +huggingface-hub==0.27.1 +idna==3.10 +importlib_resources==6.5.2 +Jinja2==3.1.5 +kiwisolver==1.4.7 +markdown-it-py==3.0.0 +matplotlib==3.9.4 +mdurl==0.1.2 +multidict==6.1.0 +multiprocess==0.70.16 +orjson==3.10.14 +packaging==24.2 +pandas==2.2.3 +pillow==10.4.0 +platformdirs==4.3.6 +propcache==0.2.1 +protobuf==5.29.3 +psutil==6.1.1 +pyarrow==19.0.0 +pydantic==2.10.5 +pydantic_core==2.27.2 +pydub==0.25.1 +Pygments==2.19.1 +pyparsing==3.2.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.0.1 +python-multipart==0.0.20 +pytz==2024.2 +requests==2.32.3 +rich==13.9.4 +ruff==0.9.2 +semantic-version==2.10.0 +sentry-sdk==2.20.0 +setproctitle==1.3.4 +shellingham==1.5.4 +six==1.17.0 +smmap==5.0.2 +sniffio==1.3.1 +starlette==0.41.3 +tomlkit==0.12.0 +tqdm==4.67.1 +typer==0.15.1 +typing_extensions==4.12.2 +tzdata==2024.2 +urllib3==2.3.0 +uvicorn==0.34.0 +uvloop==0.21.0 +wandb==0.19.3 +watchfiles==1.0.4 +websockets==12.0 +xxhash==3.5.0 +yarl==1.18.3 +zipp==3.21.0 +mpmath==1.3.0 +networkx==3.2.1 +numpy==1.26.4 +nvidia-cublas-cu12==12.4.5.8 +nvidia-cuda-cupti-cu12==12.4.127 +nvidia-cuda-nvrtc-cu12==12.4.127 +nvidia-cuda-runtime-cu12==12.4.127 +nvidia-cudnn-cu12==9.1.0.70 +nvidia-cufft-cu12==11.2.1.3 +nvidia-curand-cu12==10.3.5.147 +nvidia-cusolver-cu12==11.6.1.9 +nvidia-cusparse-cu12==12.3.1.170 +nvidia-nccl-cu12==2.21.5 +nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvtx-cu12==12.4.127 +safetensors==0.5.2 +sympy==1.13.1 +torch==2.5.1 +torchvision==0.20.1 +triton==3.1.0 +pip==23.0.1 +setuptools==58.1.0 +wheel==0.45.1 diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/wandb-metadata.json b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..128adfb217d51041e070e77d434613f6c4df2d36 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/wandb-metadata.json @@ -0,0 +1,88 @@ +{ + "os": "Linux-5.10.230-223.885.amzn2.x86_64-x86_64-with-glibc2.36", + "python": "CPython 3.9.21", + "startedAt": "2025-01-19T00:29:40.774778Z", + "args": [ + "--dataset", + "hfds/datacomp/imagenet-1k-random-80.0-frac-1over64", + "--log-wandb", + "--wandb-project", + "ImageNetTraining80.0-frac-1over64", + "--experiment", + "ImageNetTraining80.0-frac-1over64", + "--model", + "seresnet34", + "--sched", + "cosine", + "--epochs", + "150", + "--warmup-epochs", + "5", + "--lr", + "0.4", + "--reprob", + "0.5", + "--remode", + "pixel", + "--batch-size", + "256", + "--amp", + "-j", + "4" + ], + "program": "/app/pytorch-image-models/train.py", + "codePath": "train.py", + "git": { + "remote": "https://github.com/huggingface/pytorch-image-models.git", + "commit": "c96e9e7ce03e34aa8812e4aa50463e46131793e5" + }, + "email": "meg@huggingface.co", + "root": "/app/pytorch-image-models", + "host": "r-datacomp-imagenettraining80-0-frac-1over64-8picbj30-3d701-p9o", + "executable": "/usr/local/bin/python3.9", + "codePathLocal": "train.py", + "cpu_count": 24, + "cpu_count_logical": 48, + "gpu": "NVIDIA L4", + "gpu_count": 4, + "disk": { + "/": { + "total": "3757625933824", + "used": "82385797120" + } + }, + "memory": { + "total": "195171028992" + }, + "cpu": { + "count": 24, + "countLogical": 48 + }, + "gpu_nvidia": [ + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + }, + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + }, + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + }, + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + } + ], + "cudaVersion": "12.4" +} \ No newline at end of file diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/wandb-summary.json b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..0d3360bb0badda95ccf83f760fbd68d6f1803d7f --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/files/wandb-summary.json @@ -0,0 +1 @@ +{"eval_top5":2.124000001068115,"_runtime":3752.717087855,"epoch":149,"eval_loss":9.07491242126465,"_wandb":{"runtime":3753},"eval_top1":0.6199999998664856,"_timestamp":1.737250333443893e+09,"train_loss":2.8274147510528564,"_step":149,"lr":4.3863305030900085e-05} \ No newline at end of file diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-core.log b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-core.log new file mode 100644 index 0000000000000000000000000000000000000000..b7f0a08711792ff45f04a6062a6dec137f12d08c --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-core.log @@ -0,0 +1,13 @@ +{"time":"2025-01-19T00:29:40.562918487Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmpn81oueu9/port-139.txt","pid":139,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false} +{"time":"2025-01-19T00:29:40.563816644Z","level":"INFO","msg":"Will exit if parent process dies.","ppid":139} +{"time":"2025-01-19T00:29:40.563810455Z","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":34009,"Zone":""}} +{"time":"2025-01-19T00:29:40.755261729Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:46684"} +{"time":"2025-01-19T00:29:40.777003336Z","level":"INFO","msg":"handleInformInit: received","streamId":"7ozba6oj","id":"127.0.0.1:46684"} +{"time":"2025-01-19T00:29:40.88349001Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"7ozba6oj","id":"127.0.0.1:46684"} +{"time":"2025-01-19T01:32:13.860168557Z","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"127.0.0.1:46684"} +{"time":"2025-01-19T01:32:13.860285899Z","level":"INFO","msg":"connection: Close: initiating connection closure","id":"127.0.0.1:46684"} +{"time":"2025-01-19T01:32:13.860301139Z","level":"INFO","msg":"server is shutting down"} +{"time":"2025-01-19T01:32:13.860436482Z","level":"INFO","msg":"connection: Close: connection successfully closed","id":"127.0.0.1:46684"} +{"time":"2025-01-19T01:32:14.255473748Z","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"127.0.0.1:46684"} +{"time":"2025-01-19T01:32:14.255510459Z","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"127.0.0.1:46684"} +{"time":"2025-01-19T01:32:14.25553355Z","level":"INFO","msg":"server is closed"} diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-internal.log b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..f686e2d233941a66aa3b12a8966fc028eb0cdccb --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-internal.log @@ -0,0 +1,15 @@ +{"time":"2025-01-19T00:29:40.777529817Z","level":"INFO","msg":"stream: starting","core version":"0.19.3","symlink path":"/app/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-core.log"} +{"time":"2025-01-19T00:29:40.883448579Z","level":"INFO","msg":"created new stream","id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883483601Z","level":"INFO","msg":"stream: started","id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883535802Z","level":"INFO","msg":"writer: Do: started","stream_id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883571511Z","level":"INFO","msg":"handler: started","stream_id":"7ozba6oj"} +{"time":"2025-01-19T00:29:40.883555421Z","level":"INFO","msg":"sender: started","stream_id":"7ozba6oj"} +{"time":"2025-01-19T00:29:41.01753464Z","level":"INFO","msg":"Starting system monitor"} +{"time":"2025-01-19T01:32:13.86031313Z","level":"INFO","msg":"stream: closing","id":"7ozba6oj"} +{"time":"2025-01-19T01:32:13.86035543Z","level":"INFO","msg":"Stopping system monitor"} +{"time":"2025-01-19T01:32:13.861082206Z","level":"INFO","msg":"Stopped system monitor"} +{"time":"2025-01-19T01:32:14.161833213Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"} +{"time":"2025-01-19T01:32:14.255169742Z","level":"INFO","msg":"handler: closed","stream_id":"7ozba6oj"} +{"time":"2025-01-19T01:32:14.255211143Z","level":"INFO","msg":"writer: Close: closed","stream_id":"7ozba6oj"} +{"time":"2025-01-19T01:32:14.255264754Z","level":"INFO","msg":"sender: closed","stream_id":"7ozba6oj"} +{"time":"2025-01-19T01:32:14.255369426Z","level":"INFO","msg":"stream: closed","id":"7ozba6oj"} diff --git a/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug.log b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..4a7469999d35702737cd0fa8f023882f9703efea --- /dev/null +++ b/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug.log @@ -0,0 +1,23 @@ +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Current SDK version is 0.19.3 +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Configure stats pid to 139 +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Loading settings from /home/user/.config/wandb/settings +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Loading settings from /app/pytorch-image-models/wandb/settings +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_setup.py:_flush():68] Loading settings from environment variables +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:_log_setup():598] Logging user logs to /app/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug.log +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:_log_setup():599] Logging internal logs to /app/pytorch-image-models/wandb/run-20250119_002940-7ozba6oj/logs/debug-internal.log +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:init():714] calling init triggers +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:init():719] wandb.init called with sweep_config: {} +config: {'data': None, 'data_dir': None, 'dataset': 'hfds/datacomp/imagenet-1k-random-80.0-frac-1over64', 'train_split': 'train', 'val_split': 'validation', 'train_num_samples': None, 'val_num_samples': None, 'dataset_download': False, 'class_map': '', 'input_img_mode': None, 'input_key': None, 'target_key': None, 'dataset_trust_remote_code': False, 'model': 'seresnet34', 'pretrained': False, 'pretrained_path': None, 'initial_checkpoint': '', 'resume': '', 'no_resume_opt': False, 'num_classes': 1000, 'gp': None, 'img_size': None, 'in_chans': None, 'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '', 'batch_size': 256, 'validation_batch_size': None, 'channels_last': False, 'fuser': '', 'grad_accum_steps': 1, 'grad_checkpointing': False, 'fast_norm': False, 'model_kwargs': {}, 'head_init_scale': None, 'head_init_bias': None, 'torchcompile_mode': None, 'torchscript': False, 'torchcompile': None, 'device': 'cuda:0', 'amp': True, 'amp_dtype': 'float16', 'amp_impl': 'native', 'model_dtype': None, 'no_ddp_bb': False, 'synchronize_step': False, 'local_rank': 0, 'device_modules': None, 'opt': 'sgd', 'opt_eps': None, 'opt_betas': None, 'momentum': 0.9, 'weight_decay': 2e-05, 'clip_grad': None, 'clip_mode': 'norm', 'layer_decay': None, 'opt_kwargs': {}, 'sched': 'cosine', 'sched_on_updates': False, 'lr': 0.4, 'lr_base': 0.1, 'lr_base_size': 256, 'lr_base_scale': '', 'lr_noise': None, 'lr_noise_pct': 0.67, 'lr_noise_std': 1.0, 'lr_cycle_mul': 1.0, 'lr_cycle_decay': 0.5, 'lr_cycle_limit': 1, 'lr_k_decay': 1.0, 'warmup_lr': 1e-05, 'min_lr': 0, 'epochs': 150, 'epoch_repeats': 0.0, 'start_epoch': None, 'decay_milestones': [90, 180, 270], 'decay_epochs': 90, 'warmup_epochs': 5, 'warmup_prefix': False, 'cooldown_epochs': 0, 'patience_epochs': 10, 'decay_rate': 0.1, 'no_aug': False, 'train_crop_mode': None, 'scale': [0.08, 1.0], 'ratio': [0.75, 1.3333333333333333], 'hflip': 0.5, 'vflip': 0.0, 'color_jitter': 0.4, 'color_jitter_prob': None, 'grayscale_prob': None, 'gaussian_blur_prob': None, 'aa': None, 'aug_repeats': 0, 'aug_splits': 0, 'jsd_loss': False, 'bce_loss': False, 'bce_sum': False, 'bce_target_thresh': None, 'bce_pos_weight': None, 'reprob': 0.5, 'remode': 'pixel', 'recount': 1, 'resplit': False, 'mixup': 0.0, 'cutmix': 0.0, 'cutmix_minmax': None, 'mixup_prob': 1.0, 'mixup_switch_prob': 0.5, 'mixup_mode': 'batch', 'mixup_off_epoch': 0, 'smoothing': 0.1, 'train_interpolation': 'random', 'drop': 0.0, 'drop_connect': None, 'drop_path': None, 'drop_block': None, 'bn_momentum': None, 'bn_eps': None, 'sync_bn': False, 'dist_bn': 'reduce', 'split_bn': False, 'model_ema': False, 'model_ema_force_cpu': False, 'model_ema_decay': 0.9998, 'model_ema_warmup': False, 'seed': 42, 'worker_seeding': 'all', 'log_interval': 50, 'recovery_interval': 0, 'checkpoint_hist': 10, 'workers': 4, 'save_images': False, 'pin_mem': False, 'no_prefetcher': False, 'output': '', 'experiment': 'ImageNetTraining80.0-frac-1over64', 'eval_metric': 'top1', 'tta': 0, 'use_multi_epochs_loader': False, 'log_wandb': True, 'wandb_project': 'ImageNetTraining80.0-frac-1over64', 'wandb_tags': [], 'wandb_resume_id': '', 'prefetcher': True, 'distributed': True, 'world_size': 4, 'rank': 0} +2025-01-19 00:29:40,530 INFO MainThread:139 [wandb_init.py:init():745] starting backend +2025-01-19 00:29:40,753 INFO MainThread:139 [wandb_init.py:init():749] sending inform_init request +2025-01-19 00:29:40,774 INFO MainThread:139 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2025-01-19 00:29:40,774 INFO MainThread:139 [wandb_init.py:init():764] backend started and connected +2025-01-19 00:29:40,779 INFO MainThread:139 [wandb_init.py:init():857] updated telemetry +2025-01-19 00:29:40,803 INFO MainThread:139 [wandb_init.py:init():889] communicating run to backend with 90.0 second timeout +2025-01-19 00:29:41,014 INFO MainThread:139 [wandb_init.py:init():941] starting run threads in backend +2025-01-19 00:29:41,102 INFO MainThread:139 [wandb_run.py:_console_start():2420] atexit reg +2025-01-19 00:29:41,102 INFO MainThread:139 [wandb_run.py:_redirect():2270] redirect: wrap_raw +2025-01-19 00:29:41,103 INFO MainThread:139 [wandb_run.py:_redirect():2335] Wrapping output streams. +2025-01-19 00:29:41,103 INFO MainThread:139 [wandb_run.py:_redirect():2360] Redirects installed. +2025-01-19 00:29:41,105 INFO MainThread:139 [wandb_init.py:init():983] run started, returning control to user process +2025-01-19 01:32:13,860 WARNING MsgRouterThr:139 [router.py:message_loop():75] message_loop has been closed