derekl35 HF Staff commited on
Commit
ffacc0d
·
verified ·
1 Parent(s): 33f789f

Update quantization_benchmarks.csv

Browse files
Files changed (1) hide show
  1. quantization_benchmarks.csv +37 -37
quantization_benchmarks.csv CHANGED
@@ -1,39 +1,39 @@
1
- model,quant method,truthfulqa_mc1 acc ± stderr,arc:challenge acc ± stderr,hellaswag acc,winogrande acc,average,throughput (tok/s),peak process vram (GB),calibration/quantization time,throughput w/ torch.compile,peak process vram w/ torch.compile,througput w/ marlin,peak process vram w/ marlin
2
- Llama 3.1 8B,baseline (bf16),0.3807 ± 0.017,0.5623 ± 0.0145,0.5973 ± 0.0049,0.7372 ± 0.0124,0.569375,38.82,16.77302,,79.27,17.12954,,
3
- Llama 3.1 8B,AWQ (4 bit),0.3574 ± 0.017,0.5589 ± .0145,.5869 ± .0049,0.7285 ± 0.0125,0.557925,43.09,8.11808,~10 minutes,,,,
4
  Llama 3.1 8B,GPTQModel (4 bit),,,,,,36.83,6.31872,~23 minutes,,,37.84,6318.72
5
- Llama 3.1 8B,AutoGPTQ (4 bit),0.3513 ± 0.0167,0.5589 ± 0.0145,0.5758 ± 0.0049,0.7103 ± 0.0127,0.549075,43.71,6.49278,~30 minutes,,,,
6
- Llama 3.1 8B,bnb (nf4),0.3660 ± 0.0169,0.5563 ± 0.0145,0.5769 ± 0.0049,0.7301 ± 0.0125,0.557325,24.35,6.44035,~1 minute,,,,
7
- Llama 3.1 8B,optimum quanto (w4a16),0.3599 ± 0.0168,0.5427 ± 0.0146,0.5856 ± 0.0049,0.7419 ± 0.0123,0.557525,31.22,6.57667,~30 seconds,,,,
8
- Llama 3.1 8B,torchao (int4wo),0.3574 ± 0.0168,0.5418 ± 0.0146,0.5855 ± 0.0049,0.7395 ± 0.0123,0.55605,24.98,6.49907,~20 seconds,85.76,6.8493,,
9
- Llama 3.1 8B,HQQ (4 bit),0.3819 ± 0.0170,0.5495 ± 0.0145,0.5859 ± 0.0049,0.7285 ± 0.0125,0.56145,34.44,6.71718,,,,,
10
- Llama 3.1 8B,HIGGS (4 bit),0.3415 ± 0.0166,0.5486 ± 0.0145,0.5836 ± 0.0049,0.7182 ± 0.0126,0.547975,28.35,6.81994,~5 minutes,,,,
11
- Llama 3.1 8B,bnb (llm.int8()),0.3770 ± 0.0170,0.5623 ± 0.0145,0.5949 ± 0.0049,0.7324 ± 0.0124,0.56665,20.75,9.70772,~20 seconds,,,,
12
- Llama 3.1 8B,HQQ (8 bit),0.3807 ± 0.0170, 0.5666 ± 0.0145, 0.5979 ± 0.0049, 0.7380 ± 0.0124,0.5708,9.07,10.6011,~80 seconds,,,,
13
- Llama 3.1 8B,optimum quanto (int8wo),0.3745 ± 0.0169, 0.5640 ± 0.0145,0.5992 ± 0.0049, 0.7372 ± 0.0124,0.568725,15.59,9.81887,~20 seconds,16.01,10.07052,,
14
- Llama 3.1 8B,torchao (int8wo),0.3770 ± 0.0170,0.5640 ± 0.0145,0.5975 ± 0.0049,0.7380 ± 0.0124,0.569125,5.98,13.07155,~30 seconds,43.79,13.66714,,
15
- Llama 3.1 8B,fbgemm (fp8),0.3807 ± 0.0170,0.5580 ± 0.0145,0.5958 ± 0.0049,0.7411 ± 0.0123,0.5689,33.83,10.00551,~30 seconds,,,,
16
- Llama 3.1 8B,compressed-tensors (fp8),0.3696 ± 0.0169,0.5589 ± 0.0145,0.5950 ± 0.0049,0.7356 ± 0.0124,0.564775,,,,,,,
17
- Llama 3.1 8B,VPTQ (2 bit),0.3011 ± 0.0161, 0.4923 ± 0.0146, 0.5258 ± 0.0050,0.6930 ± 0.0130,0.50305,32.35,5.28902,~2 hours,31.48,5.28692,,
18
- Llama 3.1 8B,AQLM + PV (2 bit),0.3244 ± 0.0164,0.5230 ± 0.0146,0.5628 ± 0.0050,0.6938 ± 0.0130,0.526,22.28,4.84023,~1 day,27.27,4.85491,,
19
  Llama 3.1 8B,GPTQModel (2 bit),,,,,,19.02,18.45284,~26 minutes,,,,
20
- Llama 3.1 8B,AutoGPTQ (2 bit),0.2546 ± 0.0153,0.1988 ± 0.0117,0.2665 ± 0.0044,0.4799 ± 0.0140,0.29995,6.25,11.02473,~26 minutes,,,,
21
- Llama 3.1 70B,baseline (bf16),0.4162 ± 0.0173,0.6732 ± 0.0137,0.6666 ± 0.0047,0.8248 ± 0.0107,0.6452,9.73,142.26869,,10.1,142.81395,,
22
- Llama 3.1 70B,AWQ (4 bit),0.4162 ± 0.0173,0.6681 ± 0.0138,0.6598 ± 0.0047,0.8193 ± 0.0108,0.64085,15.74,43.75288,~1 hour,,,,
23
- Llama 3.1 70B,GPTQModel (4 bit),,,,,#VALUE!,14.84,40.5757,,,,15.28,40.5757
24
- Llama 3.1 70B,AutoGPTQ (4 bit),0.4039 ± 0.0172,0.6655 ± 0.0138,0.6568 ± 0.0047,0.8185 ± 0.0108,0.636175,0.46,42.40022,~2 hours,,,,
25
- Llama 3.1 70B,bnb (nf4),0.4198 ± 0.0173,0.6724 ± 0.0137,0.6592 ± 0.0047,0.8098 ± 0.0110,0.6403,11.27,44.62949,~2 minutes,,,,
26
- Llama 3.1 70B,optimum quanto (w4a16),0.2326 ± 0.0148,0.2082 ± 0.0119,0.2582 ± 0.0044,0.4878 ± 0.0140,0.2967,12.97,80.39013,~2 minutes,,,,
27
- Llama 3.1 70B,torchao (int4wo),0.2350 ± 0.0148,0.2108 ± 0.0119, 0.2581 ± 0.0044, 0.4980 ± 0.0141,0.300475,10.56,41.6054,~2 minutes,18.95,42.26181,,
28
- Llama 3.1 70B,HQQ (4 bit),0.4027 ± 0.0172,0.6706 ± 0.0137, 0.6597 ± 0.0047,0.8035 ± 0.0112,0.634125,13.92,44.50366,~10 minutes,,,,
29
- Llama 3.1 70B,HIGGS (4 bit),0.2301 ± 0.0147,0.1971 ± 0.0116,0.2575 ± 0.0044,0.4893 ± 0.0140,0.2935,11.61,41.52571,~6 minutes,12.38,41.02868,,
30
- Llama 3.1 70B,bnb (llm.int8()),0.3709 ± 0.0169,0.6544 ± 0.0139,0.6382 ± 0.0048,0.7940 ± 0.0114,0.614375,6.87,74.26428,~2 minutes,,,,
31
- Llama 3.1 70B,HQQ (8 bit),0.4211 ± 0.0173,0.6732 ± 0.0137,0.6661 ± 0.0047,0.8327 ± 0.0105,0.648275,0.98,80.52435,~10 minutes,0.98,80.39013,,
32
- Llama 3.1 70B,optimum quanto (int8wo),0.3721 ± 0.0169,0.6459 ± 0.0140,0.6413 ± 0.0048,0.7979 ± 0.0113,0.6143,1.79,74.21192,~2 minutes,1.8,74.21401,,
33
- Llama 3.1 70B,torchao (int8wo),0.4198 ± 0.0173,0.6732 ± 0.0137,0.6659 ± 0.0047,0.8240 ± 0.0107,0.645725,0.65,89.85038,~2 minutes,0.65,89.84619,,
34
- Llama 3.1 70B,fbgemm (fp8),0.4162 ± 0.0173,0.6732 ± 0.0137,0.6671 ± 0.0047,0.8216 ± 0.0108,0.644525,13.61,74.04624,~6 minutes,,,,
35
- Llama 3.1 70B,compressed-tensors (fp8),0.4223 ± 0.0173,0.6741 ± 0.0137,0.6652 ± 0.0047,0.8216 ± 0.0108,0.6458,,,,,,,
36
- Llama 3.1 70B,VPTQ (2 bit),0.3721 ± 0.0169, 0.6212 ± 0.0142,0.6073 ± 0.0049, 0.7901 ± 0.0114,0.597675,6.29,24.89949,~19 hours,6.18,24.89949,,
37
- Llama 3.1 70B,AQLM + PV (2 bit),0.3905 ± 0.0171,0.6365 ± 0.0141,0.6401 ± 0.0048,0.8066 ± 0.0111,0.618425,6.75,23.12739,10-14 days,7.09,23607.64,,
38
- Llama 3.1 70B,GPTQModel (2 bit),,,,,#VALUE!,,,,,,,
39
- Llama 3.1 70B,AutoGPTQ (2 bit),0.2558 ± 0.0153,0.2807 ± 0.0131,0.3642 ± 0.0048,0.5470 ± 0.0140,0.361925,,,,,,,
 
1
+ model,quant method,truthfulqa_mc2 acc ± stderr,arc:challenge acc ± stderr,hellaswag acc,winogrande acc,average,throughput (tok/s),peak process vram (GB),calibration/quantization time,throughput w/ torch.compile,peak process vram w/ torch.compile,througput w/ marlin,peak process vram w/ marlin
2
+ Llama 3.1 8B,baseline (bf16),0.5456 ± 0.0150,0.5623 ± 0.0145,0.5973 ± 0.0049,0.7372 ± 0.0124,0.6106,38.82,16.77302,,79.27,17.12954,,
3
+ Llama 3.1 8B,AWQ (4 bit),0.5281 ± 0.0150,0.5589 ± .0145,.5869 ± .0049,0.7285 ± 0.0125,0.6006,43.09,8.11808,~10 minutes,,,,
4
  Llama 3.1 8B,GPTQModel (4 bit),,,,,,36.83,6.31872,~23 minutes,,,37.84,6318.72
5
+ Llama 3.1 8B,AutoGPTQ (4 bit),0.5361 ± 0.0149,0.5589 ± 0.0145,0.5758 ± 0.0049,0.7103 ± 0.0127,0.595275,43.71,6.49278,~30 minutes,,,,
6
+ Llama 3.1 8B,bnb (nf4),0.5446 ± 0.0149,0.5563 ± 0.0145,0.5769 ± 0.0049,0.7301 ± 0.0125,0.601975,24.35,6.44035,~1 minute,,,,
7
+ Llama 3.1 8B,optimum quanto (w4a16),0.5162 ± 0.0150,0.5427 ± 0.0146,0.5856 ± 0.0049,0.7419 ± 0.0123,0.5966,31.22,6.57667,~30 seconds,,,,
8
+ Llama 3.1 8B,torchao (int4wo),0.5166 ±0.0150,0.5418 ± 0.0146,0.5855 ± 0.0049,0.7395 ± 0.0123,0.59585,24.98,6.49907,~20 seconds,85.76,6.8493,,
9
+ Llama 3.1 8B,HQQ (4 bit),0.5485 ± 0.0150,0.5495 ± 0.0145,0.5859 ± 0.0049,0.7285 ± 0.0125,0.6031,34.44,6.71718,,,,,
10
+ Llama 3.1 8B,HIGGS (4 bit),0.5214 ± 0.0149,0.5486 ± 0.0145,0.5836 ± 0.0049,0.7182 ± 0.0126,0.59295,28.35,6.81994,~5 minutes,,,,
11
+ Llama 3.1 8B,bnb (llm.int8()),0.5446 ± 0.0150,0.5623 ± 0.0145,0.5949 ± 0.0049,0.7324 ± 0.0124,0.60855,20.75,9.70772,~20 seconds,,,,
12
+ Llama 3.1 8B,HQQ (8 bit),0.5441 ± 0.0150, 0.5666 ± 0.0145, 0.5979 ± 0.0049, 0.7380 ± 0.0124,0.61165,9.07,10.6011,~80 seconds,,,,
13
+ Llama 3.1 8B,optimum quanto (int8wo),0.5436 ± 0.0150, 0.5640 ± 0.0145,0.5992 ± 0.0049, 0.7372 ± 0.0124,0.611,15.59,9.81887,~20 seconds,16.01,10.07052,,
14
+ Llama 3.1 8B,torchao (int8wo),0.5449 ± 0.0150,0.5640 ± 0.0145,0.5975 ± 0.0049,0.7380 ± 0.0124,0.6111,5.98,13.07155,~30 seconds,43.79,13.66714,,
15
+ Llama 3.1 8B,fbgemm (fp8),0.5430 ± 0.0150,0.5580 ± 0.0145,0.5958 ± 0.0049,0.7411 ± 0.0123,0.609475,33.83,10.00551,~30 seconds,,,,
16
+ Llama 3.1 8B,compressed-tensors (fp8),0.5398 ± 0.0151,0.5589 ± 0.0145,0.5950 ± 0.0049,0.7356 ± 0.0124,0.607325,,,,,,,
17
+ Llama 3.1 8B,VPTQ (2 bit),0.4543 ± 0.0149, 0.4923 ± 0.0146, 0.5258 ± 0.0050,0.6930 ± 0.0130,0.54135,32.35,5.28902,~2 hours,31.48,5.28692,,
18
+ Llama 3.1 8B,AQLM + PV (2 bit),0.5036 ± 0.0148,0.5230 ± 0.0146,0.5628 ± 0.0050,0.6938 ± 0.0130,0.5708,22.28,4.84023,~1 day,27.27,4.85491,,
19
  Llama 3.1 8B,GPTQModel (2 bit),,,,,,19.02,18.45284,~26 minutes,,,,
20
+ Llama 3.1 8B,AutoGPTQ (2 bit),0.5127 ± 0.0150,0.1988 ± 0.0117,0.2665 ± 0.0044,0.4799 ± 0.0140,0.364475,6.25,11.02473,~26 minutes,,,,
21
+ Llama 3.1 70B,baseline (bf16),0.6068 ± 0.0147,0.6732 ± 0.0137,0.6666 ± 0.0047,0.8248 ± 0.0107,0.69285,9.73,142.26869,,10.1,142.81395,,
22
+ Llama 3.1 70B,AWQ (4 bit),0.5706 ± 0.0150,0.6681 ± 0.0138,0.6598 ± 0.0047,0.8193 ± 0.0108,0.67945,15.74,43.75288,~1 hour,,,,
23
+ Llama 3.1 70B,GPTQModel (4 bit),,,,,,14.84,40.5757,,,,15.28,40.5757
24
+ Llama 3.1 70B,AutoGPTQ (4 bit),0.5937 ± 0.0147,0.6655 ± 0.0138,0.6568 ± 0.0047,0.8185 ± 0.0108,0.683625,0.46,42.40022,~2 hours,,,,
25
+ Llama 3.1 70B,bnb (nf4),0.5939 ± 0.0148,0.6724 ± 0.0137,0.6592 ± 0.0047,0.8098 ± 0.0110,0.683825,11.27,44.62949,~2 minutes,,,,
26
+ Llama 3.1 70B,optimum quanto (w4a16),0.4847 ± 0.0164,0.2082 ± 0.0119,0.2582 ± 0.0044,0.4878 ± 0.0140,0.359725,12.97,80.39013,~2 minutes,,,,
27
+ Llama 3.1 70B,torchao (int4wo),0.4847 ± 0.0164,0.2108 ± 0.0119, 0.2581 ± 0.0044, 0.4980 ± 0.0141,0.3629,10.56,41.6054,~2 minutes,18.95,42.26181,,
28
+ Llama 3.1 70B,HQQ (4 bit),0.5882 ± 0.0146,0.6706 ± 0.0137, 0.6597 ± 0.0047,0.8035 ± 0.0112,0.6805,13.92,44.50366,~10 minutes,,,,
29
+ Llama 3.1 70B,HIGGS (4 bit),0.4871 ± 0.0163,0.1971 ± 0.0116,0.2575 ± 0.0044,0.4893 ± 0.0140,0.35775,11.61,41.52571,~6 minutes,12.38,41.02868,,
30
+ Llama 3.1 70B,bnb (llm.int8()),0.5604 ± 0.0169,0.6544 ± 0.0139,0.6382 ± 0.0048,0.7940 ± 0.0114,0.66175,6.87,74.26428,~2 minutes,,,,
31
+ Llama 3.1 70B,HQQ (8 bit),0.6112 ± 0.0146,0.6732 ± 0.0137,0.6661 ± 0.0047,0.8327 ± 0.0105,0.6958,0.98,80.52435,~10 minutes,0.98,80.39013,,
32
+ Llama 3.1 70B,optimum quanto (int8wo),0.5591 ± 0.0150,0.6459 ± 0.0140,0.6413 ± 0.0048,0.7979 ± 0.0113,0.66105,1.79,74.21192,~2 minutes,1.8,74.21401,,
33
+ Llama 3.1 70B,torchao (int8wo),0.6094 ± 0.0146,0.6732 ± 0.0137,0.6659 ± 0.0047,0.8240 ± 0.0107,0.693125,0.65,89.85038,~2 minutes,0.65,89.84619,,
34
+ Llama 3.1 70B,fbgemm (fp8),0.6075 ± 0.0146,0.6732 ± 0.0137,0.6671 ± 0.0047,0.8216 ± 0.0108,0.69235,13.61,74.04624,~6 minutes,,,,
35
+ Llama 3.1 70B,compressed-tensors (fp8),0.6062 ± 0.0146,0.6741 ± 0.0137,0.6652 ± 0.0047,0.8216 ± 0.0108,0.691775,,,,,,,
36
+ Llama 3.1 70B,VPTQ (2 bit),0.5451 ± 0.0150, 0.6212 ± 0.0142,0.6073 ± 0.0049, 0.7901 ± 0.0114,0.640925,6.29,24.89949,~19 hours,6.18,24.89949,,
37
+ Llama 3.1 70B,AQLM + PV (2 bit),0.5706 ± 0.0150,0.6365 ± 0.0141,0.6401 ± 0.0048,0.8066 ± 0.0111,0.66345,6.75,23.12739,10-14 days,7.09,23607.64,,
38
+ Llama 3.1 70B,GPTQModel (2 bit),,,,,,,,,,,,
39
+ Llama 3.1 70B,AutoGPTQ (2 bit),0.4556 ± 0.0147,0.2807 ± 0.0131,0.3642 ± 0.0048,0.5470 ± 0.0140,0.411875,,,,,,,