Spaces:
Running
Running
{ | |
"models_auto": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 226, | |
"time_spent": "4.66, 6.10, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561673", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561472" | |
} | |
}, | |
"models_bert": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 527, | |
"time_spent": "0:01:58, 0:02:00, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4216) AssertionError: Tensor-likes are not equal!" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4216) AssertionError: Tensor-likes are not equal!" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561709", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561482" | |
} | |
}, | |
"models_clip": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 660, | |
"time_spent": "0:02:24, 0:02:20, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561994", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562125" | |
} | |
}, | |
"models_detr": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 177, | |
"time_spent": "0:01:14, 0:01:19, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562517", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562397" | |
} | |
}, | |
"models_gemma3": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 7, | |
"multi": 8 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 499, | |
"time_spent": "0:07:50, 0:07:52, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4216) AssertionError: Tensor-likes are not equal!" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_export_text_only_with_hybrid_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_function <built-in function scaled_dot_product_attention>(*(FakeTensor(..., size=(1, 4, 1, 256), grad_fn=<AddBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>)), **{'attn_mask': FakeTensor(..., size=(1, 1, 1, 512), dtype=torch.bool), 'dropout_p': 0.0, 'scale': 0.0625, 'is_causal': False}): got RuntimeError('Attempting to broadcast a dimension of length 512 at -1! Mismatching argument at index 1 had torch.Size([1, 1, 1, 512]); but expected shape should be broadcastable to [1, 4, 1, 4096]')" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_1_sdpa", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (4826) must match the existing size (4807) at non-singleton dimension 3. Target sizes: [2, 4, 4807, 4826]. Tensor sizes: [2, 1, 4807, 4807]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_2_eager", | |
"trace": "(line 265) RuntimeError: The size of tensor a (4826) must match the size of tensor b (4807) at non-singleton dimension 3" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (1646) must match the existing size (1617) at non-singleton dimension 3. Target sizes: [2, 8, 1617, 1646]. Tensor sizes: [2, 1, 1617, 1617]" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4219) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_model_parallelism", | |
"trace": "(line 925) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_export_text_only_with_hybrid_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_function <built-in function scaled_dot_product_attention>(*(FakeTensor(..., size=(1, 4, 1, 256), grad_fn=<AddBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>)), **{'attn_mask': FakeTensor(..., size=(1, 1, 1, 512), dtype=torch.bool), 'dropout_p': 0.0, 'scale': 0.0625, 'is_causal': False}): got RuntimeError('Attempting to broadcast a dimension of length 512 at -1! Mismatching argument at index 1 had torch.Size([1, 1, 1, 512]); but expected shape should be broadcastable to [1, 4, 1, 4096]')" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_1_sdpa", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (4826) must match the existing size (4807) at non-singleton dimension 3. Target sizes: [2, 4, 4807, 4826]. Tensor sizes: [2, 1, 4807, 4807]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_2_eager", | |
"trace": "(line 265) RuntimeError: The size of tensor a (4826) must match the size of tensor b (4807) at non-singleton dimension 3" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (1646) must match the existing size (1617) at non-singleton dimension 3. Target sizes: [2, 8, 1617, 1646]. Tensor sizes: [2, 1, 1617, 1617]" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563053", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562857" | |
} | |
}, | |
"models_gemma3n": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 286, | |
"time_spent": "0:02:29, 0:02:32, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562955", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563061" | |
} | |
}, | |
"models_got_ocr2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 254, | |
"time_spent": "0:02:02, 0:02:15, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/got_ocr2/test_modeling_got_ocr2.py::GotOcr2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/got_ocr2/test_modeling_got_ocr2.py::GotOcr2ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/got_ocr2/test_modeling_got_ocr2.py::GotOcr2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562995", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563212" | |
} | |
}, | |
"models_gpt2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 487, | |
"time_spent": "0:02:23, 0:02:38, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563001", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563255" | |
} | |
}, | |
"models_internvl": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 356, | |
"time_spent": "0:05:48, 0:04:49, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads", | |
"trace": "(line 439) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfResources: out of resource: shared memory, Required: 106496, Hardware limit: 101376. Reducing block sizes or `num_stages` may help." | |
}, | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads", | |
"trace": "(line 439) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfResources: out of resource: shared memory, Required: 106496, Hardware limit: 101376. Reducing block sizes or `num_stages` may help." | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563553", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563712" | |
} | |
}, | |
"models_llama": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 478, | |
"time_spent": "0:04:05, 0:03:53, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563871", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526564103" | |
} | |
}, | |
"models_llava": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 3, | |
"multi": 4 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 346, | |
"time_spent": "0:10:11, 0:09:28, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_flex_attention_with_grads", | |
"trace": "(line 687) AssertionError: False is not true" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4197) IndexError: The shape of the mask [3, 23] at index 1 does not match the shape of the indexed tensor [3, 3, 8, 8] at index 1" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_flex_attention_with_grads", | |
"trace": "(line 687) AssertionError: False is not true" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4197) IndexError: The shape of the mask [3, 23] at index 1 does not match the shape of the indexed tensor [3, 3, 8, 8] at index 1" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526564002", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526564108" | |
} | |
}, | |
"models_mistral3": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 286, | |
"time_spent": "0:10:06, 0:09:57, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561480", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561618" | |
} | |
}, | |
"models_modernbert": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 5, | |
"multi": 5 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 164, | |
"time_spent": "0:01:29, 0:01:27, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_export", | |
"trace": "(line 675) AssertionError: Lists differ: ['mechanic', 'lawyer', 'teacher', 'waiter', 'doctor'] != ['lawyer', 'mechanic', 'teacher', 'doctor', 'waiter']" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_masked_lm", | |
"trace": "(line 401) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_no_head", | |
"trace": "(line 423) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_sequence_classification", | |
"trace": "(line 469) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_token_classification", | |
"trace": "(line 446) AssertionError: Tensor-likes are not close!" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_export", | |
"trace": "(line 675) AssertionError: Lists differ: ['mechanic', 'lawyer', 'teacher', 'waiter', 'doctor'] != ['lawyer', 'mechanic', 'teacher', 'doctor', 'waiter']" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_masked_lm", | |
"trace": "(line 401) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_no_head", | |
"trace": "(line 423) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_sequence_classification", | |
"trace": "(line 469) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_token_classification", | |
"trace": "(line 446) AssertionError: Tensor-likes are not close!" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561668", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526561515" | |
} | |
}, | |
"models_qwen2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 438, | |
"time_spent": "0:02:17, 0:02:18, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_export_static_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_method index_copy_(*(FakeTensor(..., size=(1, 2, 26, 64), dtype=torch.bfloat16), 2, FakeTensor(..., device='cuda:0', size=(1,), dtype=torch.int64), FakeTensor(..., device='cuda:0', size=(1, 2, 1, 64), dtype=torch.bfloat16," | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_export_static_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_method index_copy_(*(FakeTensor(..., size=(1, 2, 26, 64), dtype=torch.bfloat16), 2, FakeTensor(..., device='cuda:0', size=(1,), dtype=torch.int64), FakeTensor(..., device='cuda:0', size=(1, 2, 1, 64), dtype=torch.bfloat16," | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562376", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562270" | |
} | |
}, | |
"models_qwen2_5_omni": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 5 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 277, | |
"time_spent": "0:03:01, 0:03:21, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_model_parallelism", | |
"trace": "(line 675) AssertionError: Items in the second set but not the first:" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch", | |
"trace": "(line 675) AssertionError: Lists differ: [\"sys[96 chars]ant\\nsystem\\nYou are a helpful assistant.\\nuse[129 chars]er.\"] != [\"sys[96 chars]ant\\nThe sound is glass shattering, and the do[198 chars]er.\"]" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_multiturn", | |
"trace": "(line 849) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.18 GiB of which 6.50 MiB is free. Process 51940 has 22.17 GiB memory in use. Of the allocated memory 21.74 GiB is allocated by PyTorch, and 27.83 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_w_audio", | |
"trace": "(line 1000) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.18 GiB of which 8.50 MiB is free. Process 51940 has 22.17 GiB memory in use. Of the allocated memory 21.75 GiB is allocated by PyTorch, and 17.78 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch", | |
"trace": "(line 675) AssertionError: Lists differ: [\"sys[96 chars]ant\\nsystem\\nYou are a helpful assistant.\\nuse[129 chars]er.\"] != [\"sys[96 chars]ant\\nThe sound is glass shattering, and the do[198 chars]er.\"]" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562375", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562289" | |
} | |
}, | |
"models_qwen2_5_vl": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 311, | |
"time_spent": "0:03:25, 0:03:29, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions", | |
"trace": "(line 675) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions", | |
"trace": "(line 675) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562382", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562290" | |
} | |
}, | |
"models_smolvlm": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 499, | |
"time_spent": "0:01:55, 0:01:47, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/smolvlm/test_modeling_smolvlm.py::SmolVLMForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/smolvlm/test_modeling_smolvlm.py::SmolVLMForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562675", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562798" | |
} | |
}, | |
"models_t5": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 592, | |
"time_spent": "0:03:34, 0:03:41, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization", | |
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization", | |
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563047", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526562939" | |
} | |
}, | |
"models_vit": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 217, | |
"time_spent": "7.34, 0:01:09, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563537", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563397" | |
} | |
}, | |
"models_wav2vec2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 4, | |
"multi": 4 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 672, | |
"time_spent": "0:04:46, 0:04:23, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_inference_mms_1b_all", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_invalid_pool", | |
"trace": "(line 675) AssertionError: Traceback (most recent call last):" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_pool", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_inference_mms_1b_all", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_invalid_pool", | |
"trace": "(line 675) AssertionError: Traceback (most recent call last):" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_pool", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563711", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563582" | |
} | |
}, | |
"models_whisper": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 8, | |
"multi": 11 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 1010, | |
"time_spent": "0:12:29, 0:14:19, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation_multilingual", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[272 chars]ting of classics, Sicilian, nade door variatio[8147 chars]le!'] != [\" Fo[272 chars]ting a classic Sicilian, nade door variation o[8150 chars]le!']" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard_prev_cond", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[422 chars]to a fisher shows in lip-nitsky attack that cu[7903 chars]le!\"] != [\" Fo[422 chars]to a Fisher shows in lip-nitsky attack that cu[7918 chars]le.\"]" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[268 chars]ating, so soft, it would make JD power and her[196 chars]ke.\"] != [\" Fo[268 chars]ating so soft, it would make JD power and her [195 chars]ke.\"]" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperStandaloneDecoderModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_forced_decoder_ids", | |
"trace": "(line 713) requests.exceptions.ReadTimeout: (ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 13cb0b08-c261-4ca3-a58f-91a2f3e327ed)')" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation_multilingual", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[272 chars]ting of classics, Sicilian, nade door variatio[8147 chars]le!'] != [\" Fo[272 chars]ting a classic Sicilian, nade door variation o[8150 chars]le!']" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard_prev_cond", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[422 chars]to a fisher shows in lip-nitsky attack that cu[7903 chars]le!\"] != [\" Fo[422 chars]to a Fisher shows in lip-nitsky attack that cu[7918 chars]le.\"]" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[268 chars]ating, so soft, it would make JD power and her[196 chars]ke.\"] != [\" Fo[268 chars]ating so soft, it would make JD power and her [195 chars]ke.\"]" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperStandaloneDecoderModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperStandaloneDecoderModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563737", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460401119/job/46526563862" | |
} | |
} | |
} | |