Spaces:
Running
Running
{ | |
"models_auto": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 80, | |
"time_spent": "5.47, 1.74, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409911", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410689" | |
} | |
}, | |
"models_bert": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 239, | |
"time_spent": "0:02:22, 0:02:16, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4216) AssertionError: Tensor-likes are not equal!" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/bert/test_modeling_bert.py::BertModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4216) AssertionError: Tensor-likes are not equal!" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409938", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410716" | |
} | |
}, | |
"models_clip": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 288, | |
"time_spent": "0:03:29, 0:03:30, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410734", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409965" | |
} | |
}, | |
"models_detr": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 77, | |
"time_spent": "0:01:13, 0:01:49, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410020", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410868" | |
} | |
}, | |
"models_gemma3": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 12, | |
"multi": 13 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 341, | |
"time_spent": "0:07:52, 0:09:43, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4219) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_export_text_only_with_hybrid_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_function <built-in function scaled_dot_product_attention>(*(FakeTensor(..., size=(1, 4, 1, 256), grad_fn=<AddBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>)), **{'attn_mask': FakeTensor(..., size=(1, 1, 1, 512), dtype=torch.bool), 'dropout_p': 0.0, 'scale': 0.0625, 'is_causal': False}): got RuntimeError('Attempting to broadcast a dimension of length 512 at -1! Mismatching argument at index 1 had torch.Size([1, 1, 1, 512]); but expected shape should be broadcastable to [1, 4, 1, 4096]')" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_1_sdpa", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (4826) must match the existing size (4807) at non-singleton dimension 3. Target sizes: [2, 4, 4807, 4826]. Tensor sizes: [2, 1, 4807, 4807]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_2_eager", | |
"trace": "(line 265) RuntimeError: The size of tensor a (4826) must match the size of tensor b (4807) at non-singleton dimension 3" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_1b_text_only", | |
"trace": "(line 715) AssertionError: Lists differ: ['Wri[57 chars]s, a silent stream,\\nInto the neural net, a wa[42 chars],\\n'] != ['Wri[57 chars]s, a river deep,\\nWith patterns hidden, secret[46 chars]ing']" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch", | |
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [264 chars]cow\"] != ['use[114 chars]rown and white cow standing on a sandy beach n[272 chars]ach']" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (1646) must match the existing size (1617) at non-singleton dimension 3. Target sizes: [2, 8, 1617, 1646]. Tensor sizes: [2, 1, 1617, 1617]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_bf16", | |
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [55 chars]ike'] != ['use[114 chars]rown and white cow standing on a sandy beach w[68 chars]oks']" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_crops", | |
"trace": "(line 715) AssertionError: Lists differ: [\"use[251 chars]. There's a blue sky with some white clouds in the background\"] != [\"use[251 chars]. There's a bright blue sky with some white clouds in the\"]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_multiimage", | |
"trace": "(line 715) AssertionError: Lists differ: [\"use[122 chars]n\\n**Main Features:**\\n\\n* **Chinese Archway[19 chars]ent\"] != [\"use[122 chars]n\\n**Overall Scene:**\\n\\nIt looks like a stree[18 chars]nt,\"]" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4219) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_model_parallelism", | |
"trace": "(line 925) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_export_text_only_with_hybrid_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_function <built-in function scaled_dot_product_attention>(*(FakeTensor(..., size=(1, 4, 1, 256), grad_fn=<AddBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>), FakeTensor(..., size=(1, 4, 4096, 256), grad_fn=<CloneBackward0>)), **{'attn_mask': FakeTensor(..., size=(1, 1, 1, 512), dtype=torch.bool), 'dropout_p': 0.0, 'scale': 0.0625, 'is_causal': False}): got RuntimeError('Attempting to broadcast a dimension of length 512 at -1! Mismatching argument at index 1 had torch.Size([1, 1, 1, 512]); but expected shape should be broadcastable to [1, 4, 1, 4096]')" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_1_sdpa", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (4826) must match the existing size (4807) at non-singleton dimension 3. Target sizes: [2, 4, 4807, 4826]. Tensor sizes: [2, 1, 4807, 4807]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_generation_beyond_sliding_window_2_eager", | |
"trace": "(line 265) RuntimeError: The size of tensor a (4826) must match the size of tensor b (4807) at non-singleton dimension 3" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_1b_text_only", | |
"trace": "(line 715) AssertionError: Lists differ: ['Wri[57 chars]s, a silent stream,\\nInto the neural net, a wa[42 chars],\\n'] != ['Wri[57 chars]s, a river deep,\\nWith patterns hidden, secret[46 chars]ing']" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch", | |
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [264 chars]cow\"] != ['use[114 chars]rown and white cow standing on a sandy beach n[272 chars]ach']" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops", | |
"trace": "(line 81) RuntimeError: The expanded size of the tensor (1646) must match the existing size (1617) at non-singleton dimension 3. Target sizes: [2, 8, 1617, 1646]. Tensor sizes: [2, 1, 1617, 1617]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_bf16", | |
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [55 chars]ike'] != ['use[114 chars]rown and white cow standing on a sandy beach w[68 chars]oks']" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_crops", | |
"trace": "(line 715) AssertionError: Lists differ: [\"use[251 chars]. There's a blue sky with some white clouds in the background\"] != [\"use[251 chars]. There's a bright blue sky with some white clouds in the\"]" | |
}, | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_multiimage", | |
"trace": "(line 715) AssertionError: Lists differ: [\"use[122 chars]n\\n**Main Features:**\\n\\n* **Chinese Archway[19 chars]ent\"] != [\"use[122 chars]n\\n**Overall Scene:**\\n\\nIt looks like a stree[18 chars]nt,\"]" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410076", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410943" | |
} | |
}, | |
"models_gemma3n": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 0, | |
"time_spent": ".56, .97, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410944", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410122" | |
} | |
}, | |
"models_got_ocr2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 146, | |
"time_spent": "0:01:56, 0:01:39, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/got_ocr2/test_modeling_got_ocr2.py::GotOcr2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/got_ocr2/test_modeling_got_ocr2.py::GotOcr2ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/got_ocr2/test_modeling_got_ocr2.py::GotOcr2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410969", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410123" | |
} | |
}, | |
"models_gpt2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 249, | |
"time_spent": "0:04:53, 0:02:05, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410990", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410088" | |
} | |
}, | |
"models_internvl": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 252, | |
"time_spent": "0:02:54, 0:02:55, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLLlamaIntegrationTest::test_llama_small_model_integration_forward", | |
"trace": "(line 727) AssertionError: False is not true : Actual logits: tensor([ -9.8750, -0.4885, 1.4668, -10.3359, -10.3359], dtype=torch.float16)" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLLlamaIntegrationTest::test_llama_small_model_integration_forward", | |
"trace": "(line 727) AssertionError: False is not true : Actual logits: tensor([ -9.8750, -0.4885, 1.4668, -10.3359, -10.3359], dtype=torch.float16)" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527411014", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410165" | |
} | |
}, | |
"models_llama": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 232, | |
"time_spent": "0:10:51, 0:23:47, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaIntegrationTest::test_model_7b_logits_bf16", | |
"trace": "(line 727) AssertionError: False is not true" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llama/test_modeling_llama.py::LlamaIntegrationTest::test_model_7b_logits_bf16", | |
"trace": "(line 727) AssertionError: False is not true" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527411041", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410199" | |
} | |
}, | |
"models_llava": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 4, | |
"multi": 5 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 202, | |
"time_spent": "0:02:38, 0:02:51, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_flex_attention_with_grads", | |
"trace": "(line 727) AssertionError: False is not true" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4197) IndexError: The shape of the mask [3, 23] at index 1 does not match the shape of the indexed tensor [3, 3, 8, 8] at index 1" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation", | |
"trace": "(line 399) importlib.metadata.PackageNotFoundError: No package metadata was found for bitsandbytes" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_flex_attention_with_grads", | |
"trace": "(line 727) AssertionError: False is not true" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4197) IndexError: The shape of the mask [3, 23] at index 1 does not match the shape of the indexed tensor [3, 3, 8, 8] at index 1" | |
}, | |
{ | |
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation", | |
"trace": "(line 399) importlib.metadata.PackageNotFoundError: No package metadata was found for bitsandbytes" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527411134", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410218" | |
} | |
}, | |
"models_mistral3": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 198, | |
"time_spent": "0:14:37, 0:05:43, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_generate", | |
"trace": "(line 715) AssertionError: 'The [14 chars] two cats lying on a pink surface, which appea[21 chars] bed' != 'The [14 chars] two tabby cats lying on a pink surface, which[23 chars]n or'" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_generate", | |
"trace": "(line 715) AssertionError: 'The [14 chars] two cats lying on a pink surface, which appea[21 chars] bed' != 'The [14 chars] two tabby cats lying on a pink surface, which[23 chars]n or'" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409417", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410265" | |
} | |
}, | |
"models_modernbert": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 5, | |
"multi": 5 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 132, | |
"time_spent": "0:02:22, 0:01:49, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_export", | |
"trace": "(line 715) AssertionError: Lists differ: ['mechanic', 'lawyer', 'teacher', 'waiter', 'doctor'] != ['lawyer', 'mechanic', 'teacher', 'doctor', 'waiter']" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_masked_lm", | |
"trace": "(line 401) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_no_head", | |
"trace": "(line 423) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_sequence_classification", | |
"trace": "(line 469) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_token_classification", | |
"trace": "(line 446) AssertionError: Tensor-likes are not close!" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_export", | |
"trace": "(line 715) AssertionError: Lists differ: ['mechanic', 'lawyer', 'teacher', 'waiter', 'doctor'] != ['lawyer', 'mechanic', 'teacher', 'doctor', 'waiter']" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_masked_lm", | |
"trace": "(line 401) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_no_head", | |
"trace": "(line 423) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_sequence_classification", | |
"trace": "(line 469) AssertionError: Tensor-likes are not close!" | |
}, | |
{ | |
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelIntegrationTest::test_inference_token_classification", | |
"trace": "(line 446) AssertionError: Tensor-likes are not close!" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410294", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409446" | |
} | |
}, | |
"models_qwen2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 2, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 214, | |
"time_spent": "0:02:23, 0:02:39, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_export_static_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_method index_copy_(*(FakeTensor(..., size=(1, 2, 26, 64), dtype=torch.bfloat16), 2, FakeTensor(..., device='cuda:0', size=(1,), dtype=torch.int64), FakeTensor(..., device='cuda:0', size=(1, 2, 1, 64), dtype=torch.bfloat16," | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_export_static_cache", | |
"trace": "(line 1642) torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_method index_copy_(*(FakeTensor(..., size=(1, 2, 26, 64), dtype=torch.bfloat16), 2, FakeTensor(..., device='cuda:0', size=(1,), dtype=torch.int64), FakeTensor(..., device='cuda:0', size=(1, 2, 1, 64), dtype=torch.bfloat16," | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410392", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409572" | |
} | |
}, | |
"models_qwen2_5_omni": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 3 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 167, | |
"time_spent": "0:06:59, 0:02:55, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_model_parallelism", | |
"trace": "(line 715) AssertionError: Items in the second set but not the first:" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 1305) AttributeError: 'DynamicCache' object has no attribute 'layers'" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch", | |
"trace": "(line 715) AssertionError: Lists differ: [\"sys[96 chars]ant\\nsystem\\nYou are a helpful assistant.\\nuse[129 chars]er.\"] != [\"sys[96 chars]ant\\nThe sound is glass shattering, and the do[198 chars]er.\"]" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch", | |
"trace": "(line 715) AssertionError: Lists differ: [\"sys[96 chars]ant\\nsystem\\nYou are a helpful assistant.\\nuse[129 chars]er.\"] != [\"sys[96 chars]ant\\nThe sound is glass shattering, and the do[198 chars]er.\"]" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410407", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409568" | |
} | |
}, | |
"models_qwen2_5_vl": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 204, | |
"time_spent": "0:03:59, 0:03:58, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 406) AssertionError: Tensor-likes are not equal!" | |
}, | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions", | |
"trace": "(line 715) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions", | |
"trace": "(line 715) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410397", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409587" | |
} | |
}, | |
"models_smolvlm": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 323, | |
"time_spent": "0:02:49, 0:02:35, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/smolvlm/test_modeling_smolvlm.py::SmolVLMForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/smolvlm/test_modeling_smolvlm.py::SmolVLMForConditionalGenerationModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409653", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410495" | |
} | |
}, | |
"models_t5": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 3, | |
"multi": 4 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 254, | |
"time_spent": "0:05:05, 0:03:30, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization", | |
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_small_integration_test", | |
"trace": "(line 727) AssertionError: False is not true" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_eager_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4140) KeyError: 'eager'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization", | |
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_small_integration_test", | |
"trace": "(line 727) AssertionError: False is not true" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410524", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409705" | |
} | |
}, | |
"models_vit": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 135, | |
"time_spent": "0:02:19, 0:01:21, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410589", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409755" | |
} | |
}, | |
"models_wav2vec2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 0, | |
"time_spent": "0.96, .03, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410594", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409797" | |
} | |
}, | |
"models_whisper": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"success": 0, | |
"time_spent": ".19, .20, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527409794", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16460430974/job/46527410606" | |
} | |
} | |
} | |