File size: 1,332 Bytes
c9ae6ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
benchmarks:
gptj-99:
"rouge1": 42.556635
"rouge2": 19.922265
"rougeL": 29.688219
"gen_len": 3615191
gptj-99.9:
"rouge1": 42.9435135
"rouge2": 20.1033765
"rougeL": 29.9581119
"gen_len": 3615191
gptj:
"rouge1": 42.9865
"rouge2": 20.1235
"rougeL": 29.9881
"gen_len": 3615191
scenarios:
gptj-99.9-bf16:
dataset: cnn_dailymail
code_dir: gpt-j
benchmark: gptj-99.9
command: python main.py --device socket --num_workers 8 --user_conf configs/bf16.conf
precision: bf16
batch_size: 12
gptj-99-fp8:
dataset: cnn_dailymail
code_dir: gpt-j
benchmark: gptj-99
command: PT_USE_FP8_143=1 UPDATE_MME_OUTPUT_PRECISION_FILTER="v_proj,matmul_av" ENABLE_EXPERIMENTAL_FLAGS=true python main.py -qf quantization/configuration/examples/quant_on.json --device socket --num_workers 8 --user_conf configs/fp8-99.conf --dtype float8
precision: fp8
batch_size: 32
gptj-99.9-fp8:
dataset: cnn_dailymail
code_dir: gpt-j
benchmark: gptj-99.9
command: PT_USE_FP8_143=1 UPDATE_MME_OUTPUT_PRECISION_FILTER="v_proj,matmul_av" ENABLE_EXPERIMENTAL_FLAGS=true python main.py -qf quantization/configuration/examples/quant_on.json --device socket --num_workers 8 --user_conf configs/fp8-99.conf --dtype float8
precision: fp8
batch_size: 32
|