hf-transformers-bot commited on
Commit
1fe60f6
·
verified ·
1 Parent(s): 533b350

Upload benchmark results for run 17983410436

Browse files
2025-09-24/17983410436/benchmark_results/Llama-2-7b-hf/Llama-2-7b-hf_benchmark_20250924_164948.json ADDED
@@ -0,0 +1,1175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "Llama-2-7b-hf",
3
+ "benchmark_scenarios": [
4
+ {
5
+ "scenario_name": "eager_eager_attn",
6
+ "metadata": {
7
+ "timestamp": "2025-09-24T16:43:44.476550",
8
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
9
+ "hardware_info": {
10
+ "gpu_name": "NVIDIA A10G",
11
+ "gpu_memory_total_mb": 23028,
12
+ "cpu_count": 16,
13
+ "memory_total_mb": 63607,
14
+ "python_version": "3.10.12",
15
+ "torch_version": "2.8.0+cu126",
16
+ "cuda_version": "12.6"
17
+ },
18
+ "config": {
19
+ "name": "eager",
20
+ "model_id": "meta-llama/Llama-2-7b-hf",
21
+ "variant": "eager",
22
+ "warmup_iterations": 3,
23
+ "measurement_iterations": 5,
24
+ "num_tokens_to_generate": 100,
25
+ "device": "cuda",
26
+ "torch_dtype": "float16",
27
+ "compile_mode": null,
28
+ "compile_options": {},
29
+ "use_cache": true,
30
+ "batch_size": 1,
31
+ "sequence_length": null,
32
+ "attn_implementation": "eager",
33
+ "sdpa_backend": null,
34
+ "custom_params": {}
35
+ }
36
+ },
37
+ "measurements": {
38
+ "latency_seconds": {
39
+ "name": "latency_seconds",
40
+ "measurements": [
41
+ 3.6565966796875,
42
+ 3.666757568359375,
43
+ 3.66456298828125,
44
+ 3.665447509765625,
45
+ 3.65877490234375
46
+ ],
47
+ "mean": 3.6624279296875004,
48
+ "median": 3.66456298828125,
49
+ "std": 0.003994250753586616,
50
+ "min": 3.6565966796875,
51
+ "max": 3.666757568359375,
52
+ "p25": 3.65877490234375,
53
+ "p75": 3.665447509765625,
54
+ "p90": 3.6662335449218753,
55
+ "p95": 3.666495556640625,
56
+ "p99": 3.666705166015625,
57
+ "unit": "seconds"
58
+ },
59
+ "time_to_first_token_seconds": {
60
+ "name": "time_to_first_token_seconds",
61
+ "measurements": [
62
+ 0.03796169662475586,
63
+ 0.03807360076904297,
64
+ 0.03708899307250976,
65
+ 0.038081184387207034,
66
+ 0.03862006378173828
67
+ ],
68
+ "mean": 0.037965107727050786,
69
+ "median": 0.03807360076904297,
70
+ "std": 0.0004943279252249378,
71
+ "min": 0.03708899307250976,
72
+ "max": 0.03862006378173828,
73
+ "p25": 0.03796169662475586,
74
+ "p75": 0.038081184387207034,
75
+ "p90": 0.03840451202392578,
76
+ "p95": 0.03851228790283203,
77
+ "p99": 0.03859850860595703,
78
+ "unit": "seconds"
79
+ },
80
+ "tokens_per_second": {
81
+ "name": "tokens_per_second",
82
+ "measurements": [
83
+ 27.34783427319258,
84
+ 27.27205116119613,
85
+ 27.288383449755333,
86
+ 27.28179839803358,
87
+ 27.331552956685492
88
+ ],
89
+ "mean": 27.30432404777262,
90
+ "median": 27.288383449755333,
91
+ "std": 0.029791273775972214,
92
+ "min": 27.27205116119613,
93
+ "max": 27.34783427319258,
94
+ "p25": 27.28179839803358,
95
+ "p75": 27.331552956685492,
96
+ "p90": 27.341321746589745,
97
+ "p95": 27.344578009891162,
98
+ "p99": 27.347183020532295,
99
+ "unit": "tokens/sec"
100
+ },
101
+ "time_per_output_token_seconds": {
102
+ "name": "time_per_output_token_seconds",
103
+ "measurements": [
104
+ 0.036565966796875,
105
+ 0.03666757568359375,
106
+ 0.036645629882812496,
107
+ 0.03665447509765625,
108
+ 0.036587749023437505
109
+ ],
110
+ "mean": 0.036624279296875,
111
+ "median": 0.036645629882812496,
112
+ "std": 3.994250753586461e-05,
113
+ "min": 0.036565966796875,
114
+ "max": 0.03666757568359375,
115
+ "p25": 0.036587749023437505,
116
+ "p75": 0.03665447509765625,
117
+ "p90": 0.03666233544921875,
118
+ "p95": 0.036664955566406254,
119
+ "p99": 0.036667051660156255,
120
+ "unit": "seconds/token"
121
+ }
122
+ },
123
+ "gpu_metrics": {
124
+ "gpu_utilization_mean": 90.73913043478261,
125
+ "gpu_utilization_max": 94,
126
+ "gpu_utilization_min": 86,
127
+ "gpu_memory_used_mean": 13253,
128
+ "gpu_memory_used_max": 13253,
129
+ "gpu_memory_used_min": 13253,
130
+ "sample_count": 92,
131
+ "gpu_monitoring_status": "success"
132
+ }
133
+ },
134
+ {
135
+ "scenario_name": "compiled_compile_max-autotune_eager_attn",
136
+ "metadata": {
137
+ "timestamp": "2025-09-24T16:44:19.874411",
138
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
139
+ "hardware_info": {
140
+ "gpu_name": "NVIDIA A10G",
141
+ "gpu_memory_total_mb": 23028,
142
+ "cpu_count": 16,
143
+ "memory_total_mb": 63607,
144
+ "python_version": "3.10.12",
145
+ "torch_version": "2.8.0+cu126",
146
+ "cuda_version": "12.6"
147
+ },
148
+ "config": {
149
+ "name": "compiled",
150
+ "model_id": "meta-llama/Llama-2-7b-hf",
151
+ "variant": "compiled",
152
+ "warmup_iterations": 3,
153
+ "measurement_iterations": 5,
154
+ "num_tokens_to_generate": 100,
155
+ "device": "cuda",
156
+ "torch_dtype": "float16",
157
+ "compile_mode": "max-autotune",
158
+ "compile_options": {},
159
+ "use_cache": true,
160
+ "batch_size": 1,
161
+ "sequence_length": null,
162
+ "attn_implementation": "eager",
163
+ "sdpa_backend": null,
164
+ "custom_params": {}
165
+ }
166
+ },
167
+ "measurements": {
168
+ "latency_seconds": {
169
+ "name": "latency_seconds",
170
+ "measurements": [
171
+ 7.8454990234375,
172
+ 7.8639033203125,
173
+ 7.91662353515625,
174
+ 7.9870107421875,
175
+ 8.05925341796875
176
+ ],
177
+ "mean": 7.9344580078125,
178
+ "median": 7.91662353515625,
179
+ "std": 0.07942973943119347,
180
+ "min": 7.8454990234375,
181
+ "max": 8.05925341796875,
182
+ "p25": 7.8639033203125,
183
+ "p75": 7.9870107421875,
184
+ "p90": 8.03035634765625,
185
+ "p95": 8.0448048828125,
186
+ "p99": 8.0563637109375,
187
+ "unit": "seconds"
188
+ },
189
+ "time_to_first_token_seconds": {
190
+ "name": "time_to_first_token_seconds",
191
+ "measurements": [
192
+ 0.039021312713623045,
193
+ 0.039190624237060545,
194
+ 0.03864041519165039,
195
+ 0.03827344131469727,
196
+ 0.03864451217651367
197
+ ],
198
+ "mean": 0.03875406112670898,
199
+ "median": 0.03864451217651367,
200
+ "std": 0.00032184547061294176,
201
+ "min": 0.03827344131469727,
202
+ "max": 0.039190624237060545,
203
+ "p25": 0.03864041519165039,
204
+ "p75": 0.039021312713623045,
205
+ "p90": 0.039122899627685545,
206
+ "p95": 0.039156761932373045,
207
+ "p99": 0.039183851776123046,
208
+ "unit": "seconds"
209
+ },
210
+ "tokens_per_second": {
211
+ "name": "tokens_per_second",
212
+ "measurements": [
213
+ 12.746161805802515,
214
+ 12.716331308613563,
215
+ 12.63164776699544,
216
+ 12.520328722208752,
217
+ 12.408097228589687
218
+ ],
219
+ "mean": 12.604513366441992,
220
+ "median": 12.63164776699544,
221
+ "std": 0.12567431065388374,
222
+ "min": 12.408097228589687,
223
+ "max": 12.746161805802515,
224
+ "p25": 12.520328722208752,
225
+ "p75": 12.716331308613563,
226
+ "p90": 12.734229606926935,
227
+ "p95": 12.740195706364725,
228
+ "p99": 12.744968585914958,
229
+ "unit": "tokens/sec"
230
+ },
231
+ "time_per_output_token_seconds": {
232
+ "name": "time_per_output_token_seconds",
233
+ "measurements": [
234
+ 0.07845499023437501,
235
+ 0.078639033203125,
236
+ 0.0791662353515625,
237
+ 0.079870107421875,
238
+ 0.0805925341796875
239
+ ],
240
+ "mean": 0.07934458007812499,
241
+ "median": 0.0791662353515625,
242
+ "std": 0.0007942973943119339,
243
+ "min": 0.07845499023437501,
244
+ "max": 0.0805925341796875,
245
+ "p25": 0.078639033203125,
246
+ "p75": 0.079870107421875,
247
+ "p90": 0.08030356347656249,
248
+ "p95": 0.080448048828125,
249
+ "p99": 0.08056363710937499,
250
+ "unit": "seconds/token"
251
+ }
252
+ },
253
+ "gpu_metrics": {
254
+ "gpu_utilization_mean": 43.641176470588235,
255
+ "gpu_utilization_max": 94,
256
+ "gpu_utilization_min": 0,
257
+ "gpu_memory_used_mean": 13629.505882352942,
258
+ "gpu_memory_used_max": 13777,
259
+ "gpu_memory_used_min": 13415,
260
+ "sample_count": 170,
261
+ "gpu_monitoring_status": "success"
262
+ }
263
+ },
264
+ {
265
+ "scenario_name": "eager_sdpa_default",
266
+ "metadata": {
267
+ "timestamp": "2025-09-24T16:45:48.648436",
268
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
269
+ "hardware_info": {
270
+ "gpu_name": "NVIDIA A10G",
271
+ "gpu_memory_total_mb": 23028,
272
+ "cpu_count": 16,
273
+ "memory_total_mb": 63607,
274
+ "python_version": "3.10.12",
275
+ "torch_version": "2.8.0+cu126",
276
+ "cuda_version": "12.6"
277
+ },
278
+ "config": {
279
+ "name": "eager",
280
+ "model_id": "meta-llama/Llama-2-7b-hf",
281
+ "variant": "eager",
282
+ "warmup_iterations": 3,
283
+ "measurement_iterations": 5,
284
+ "num_tokens_to_generate": 100,
285
+ "device": "cuda",
286
+ "torch_dtype": "float16",
287
+ "compile_mode": null,
288
+ "compile_options": {},
289
+ "use_cache": true,
290
+ "batch_size": 1,
291
+ "sequence_length": null,
292
+ "attn_implementation": "sdpa",
293
+ "sdpa_backend": null,
294
+ "custom_params": {}
295
+ }
296
+ },
297
+ "measurements": {
298
+ "latency_seconds": {
299
+ "name": "latency_seconds",
300
+ "measurements": [
301
+ 3.417547119140625,
302
+ 3.41592578125,
303
+ 3.4152060546875,
304
+ 3.41432763671875,
305
+ 3.413631591796875
306
+ ],
307
+ "mean": 3.41532763671875,
308
+ "median": 3.4152060546875,
309
+ "std": 0.0013546386727549295,
310
+ "min": 3.413631591796875,
311
+ "max": 3.417547119140625,
312
+ "p25": 3.41432763671875,
313
+ "p75": 3.41592578125,
314
+ "p90": 3.416898583984375,
315
+ "p95": 3.4172228515625,
316
+ "p99": 3.417482265625,
317
+ "unit": "seconds"
318
+ },
319
+ "time_to_first_token_seconds": {
320
+ "name": "time_to_first_token_seconds",
321
+ "measurements": [
322
+ 0.03513091278076172,
323
+ 0.03474332809448242,
324
+ 0.03460009765625,
325
+ 0.03464044952392578,
326
+ 0.03464387130737305
327
+ ],
328
+ "mean": 0.03475173187255859,
329
+ "median": 0.03464387130737305,
330
+ "std": 0.00019537624360530697,
331
+ "min": 0.03460009765625,
332
+ "max": 0.03513091278076172,
333
+ "p25": 0.03464044952392578,
334
+ "p75": 0.03474332809448242,
335
+ "p90": 0.03497587890625,
336
+ "p95": 0.035053395843505857,
337
+ "p99": 0.035115409393310544,
338
+ "unit": "seconds"
339
+ },
340
+ "tokens_per_second": {
341
+ "name": "tokens_per_second",
342
+ "measurements": [
343
+ 29.2607523799543,
344
+ 29.274640728115205,
345
+ 29.28081011766368,
346
+ 29.28834331086702,
347
+ 29.294315250744962
348
+ ],
349
+ "mean": 29.279772357469035,
350
+ "median": 29.28081011766368,
351
+ "std": 0.011611434861317509,
352
+ "min": 29.2607523799543,
353
+ "max": 29.294315250744962,
354
+ "p25": 29.274640728115205,
355
+ "p75": 29.28834331086702,
356
+ "p90": 29.291926474793787,
357
+ "p95": 29.293120862769374,
358
+ "p99": 29.294076373149846,
359
+ "unit": "tokens/sec"
360
+ },
361
+ "time_per_output_token_seconds": {
362
+ "name": "time_per_output_token_seconds",
363
+ "measurements": [
364
+ 0.03417547119140625,
365
+ 0.034159257812499996,
366
+ 0.034152060546875,
367
+ 0.034143276367187496,
368
+ 0.03413631591796875
369
+ ],
370
+ "mean": 0.0341532763671875,
371
+ "median": 0.034152060546875,
372
+ "std": 1.3546386727550617e-05,
373
+ "min": 0.03413631591796875,
374
+ "max": 0.03417547119140625,
375
+ "p25": 0.034143276367187496,
376
+ "p75": 0.034159257812499996,
377
+ "p90": 0.03416898583984375,
378
+ "p95": 0.034172228515625,
379
+ "p99": 0.03417482265625,
380
+ "unit": "seconds/token"
381
+ }
382
+ },
383
+ "gpu_metrics": {
384
+ "gpu_utilization_mean": 96.79069767441861,
385
+ "gpu_utilization_max": 98,
386
+ "gpu_utilization_min": 94,
387
+ "gpu_memory_used_mean": 13849,
388
+ "gpu_memory_used_max": 13849,
389
+ "gpu_memory_used_min": 13849,
390
+ "sample_count": 86,
391
+ "gpu_monitoring_status": "success"
392
+ }
393
+ },
394
+ {
395
+ "scenario_name": "eager_sdpa_math",
396
+ "metadata": {
397
+ "timestamp": "2025-09-24T16:46:21.240956",
398
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
399
+ "hardware_info": {
400
+ "gpu_name": "NVIDIA A10G",
401
+ "gpu_memory_total_mb": 23028,
402
+ "cpu_count": 16,
403
+ "memory_total_mb": 63607,
404
+ "python_version": "3.10.12",
405
+ "torch_version": "2.8.0+cu126",
406
+ "cuda_version": "12.6"
407
+ },
408
+ "config": {
409
+ "name": "eager",
410
+ "model_id": "meta-llama/Llama-2-7b-hf",
411
+ "variant": "eager",
412
+ "warmup_iterations": 3,
413
+ "measurement_iterations": 5,
414
+ "num_tokens_to_generate": 100,
415
+ "device": "cuda",
416
+ "torch_dtype": "float16",
417
+ "compile_mode": null,
418
+ "compile_options": {},
419
+ "use_cache": true,
420
+ "batch_size": 1,
421
+ "sequence_length": null,
422
+ "attn_implementation": "sdpa",
423
+ "sdpa_backend": "math",
424
+ "custom_params": {}
425
+ }
426
+ },
427
+ "measurements": {
428
+ "latency_seconds": {
429
+ "name": "latency_seconds",
430
+ "measurements": [
431
+ 3.552343994140625,
432
+ 3.546267822265625,
433
+ 3.5585224609375,
434
+ 3.554125,
435
+ 3.563354736328125
436
+ ],
437
+ "mean": 3.5549228027343753,
438
+ "median": 3.554125,
439
+ "std": 0.0057662895034736586,
440
+ "min": 3.546267822265625,
441
+ "max": 3.563354736328125,
442
+ "p25": 3.552343994140625,
443
+ "p75": 3.5585224609375,
444
+ "p90": 3.561421826171875,
445
+ "p95": 3.56238828125,
446
+ "p99": 3.5631614453125002,
447
+ "unit": "seconds"
448
+ },
449
+ "time_to_first_token_seconds": {
450
+ "name": "time_to_first_token_seconds",
451
+ "measurements": [
452
+ 0.039389888763427736,
453
+ 0.040807903289794924,
454
+ 0.038525600433349606,
455
+ 0.03848828887939453,
456
+ 0.040292385101318356
457
+ ],
458
+ "mean": 0.03950081329345703,
459
+ "median": 0.039389888763427736,
460
+ "std": 0.0009299053982509372,
461
+ "min": 0.03848828887939453,
462
+ "max": 0.040807903289794924,
463
+ "p25": 0.038525600433349606,
464
+ "p75": 0.040292385101318356,
465
+ "p90": 0.0406016960144043,
466
+ "p95": 0.04070479965209961,
467
+ "p99": 0.04078728256225586,
468
+ "unit": "seconds"
469
+ },
470
+ "tokens_per_second": {
471
+ "name": "tokens_per_second",
472
+ "measurements": [
473
+ 28.150426919505517,
474
+ 28.198659833907413,
475
+ 28.10155088178221,
476
+ 28.136320472690183,
477
+ 28.063442289510995
478
+ ],
479
+ "mean": 28.130080079479264,
480
+ "median": 28.136320472690183,
481
+ "std": 0.045630317327406016,
482
+ "min": 28.063442289510995,
483
+ "max": 28.198659833907413,
484
+ "p25": 28.10155088178221,
485
+ "p75": 28.150426919505517,
486
+ "p90": 28.179366668146656,
487
+ "p95": 28.189013251027035,
488
+ "p99": 28.19673051733134,
489
+ "unit": "tokens/sec"
490
+ },
491
+ "time_per_output_token_seconds": {
492
+ "name": "time_per_output_token_seconds",
493
+ "measurements": [
494
+ 0.03552343994140625,
495
+ 0.03546267822265625,
496
+ 0.035585224609375,
497
+ 0.035541249999999996,
498
+ 0.035633547363281254
499
+ ],
500
+ "mean": 0.03554922802734375,
501
+ "median": 0.035541249999999996,
502
+ "std": 5.766289503473765e-05,
503
+ "min": 0.03546267822265625,
504
+ "max": 0.035633547363281254,
505
+ "p25": 0.03552343994140625,
506
+ "p75": 0.035585224609375,
507
+ "p90": 0.035614218261718754,
508
+ "p95": 0.0356238828125,
509
+ "p99": 0.035631614453125,
510
+ "unit": "seconds/token"
511
+ }
512
+ },
513
+ "gpu_metrics": {
514
+ "gpu_utilization_mean": 95.62222222222222,
515
+ "gpu_utilization_max": 97,
516
+ "gpu_utilization_min": 87,
517
+ "gpu_memory_used_mean": 13849,
518
+ "gpu_memory_used_max": 13849,
519
+ "gpu_memory_used_min": 13849,
520
+ "sample_count": 90,
521
+ "gpu_monitoring_status": "success"
522
+ }
523
+ },
524
+ {
525
+ "scenario_name": "eager_sdpa_flash_attention",
526
+ "metadata": {
527
+ "timestamp": "2025-09-24T16:46:54.980169",
528
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
529
+ "hardware_info": {
530
+ "gpu_name": "NVIDIA A10G",
531
+ "gpu_memory_total_mb": 23028,
532
+ "cpu_count": 16,
533
+ "memory_total_mb": 63607,
534
+ "python_version": "3.10.12",
535
+ "torch_version": "2.8.0+cu126",
536
+ "cuda_version": "12.6"
537
+ },
538
+ "config": {
539
+ "name": "eager",
540
+ "model_id": "meta-llama/Llama-2-7b-hf",
541
+ "variant": "eager",
542
+ "warmup_iterations": 3,
543
+ "measurement_iterations": 5,
544
+ "num_tokens_to_generate": 100,
545
+ "device": "cuda",
546
+ "torch_dtype": "float16",
547
+ "compile_mode": null,
548
+ "compile_options": {},
549
+ "use_cache": true,
550
+ "batch_size": 1,
551
+ "sequence_length": null,
552
+ "attn_implementation": "sdpa",
553
+ "sdpa_backend": "flash_attention",
554
+ "custom_params": {}
555
+ }
556
+ },
557
+ "measurements": {
558
+ "latency_seconds": {
559
+ "name": "latency_seconds",
560
+ "measurements": [
561
+ 3.41668994140625,
562
+ 3.415956298828125,
563
+ 3.413249267578125,
564
+ 3.4136708984375,
565
+ 3.417302978515625
566
+ ],
567
+ "mean": 3.415373876953125,
568
+ "median": 3.415956298828125,
569
+ "std": 0.001625224571849383,
570
+ "min": 3.413249267578125,
571
+ "max": 3.417302978515625,
572
+ "p25": 3.4136708984375,
573
+ "p75": 3.41668994140625,
574
+ "p90": 3.417057763671875,
575
+ "p95": 3.41718037109375,
576
+ "p99": 3.4172784570312498,
577
+ "unit": "seconds"
578
+ },
579
+ "time_to_first_token_seconds": {
580
+ "name": "time_to_first_token_seconds",
581
+ "measurements": [
582
+ 0.035528350830078125,
583
+ 0.03476684951782227,
584
+ 0.034637504577636716,
585
+ 0.03473231887817383,
586
+ 0.03469190216064453
587
+ ],
588
+ "mean": 0.03487138519287109,
589
+ "median": 0.03473231887817383,
590
+ "std": 0.00033129602978360166,
591
+ "min": 0.034637504577636716,
592
+ "max": 0.035528350830078125,
593
+ "p25": 0.03469190216064453,
594
+ "p75": 0.03476684951782227,
595
+ "p90": 0.03522375030517578,
596
+ "p95": 0.03537605056762695,
597
+ "p99": 0.03549789077758789,
598
+ "unit": "seconds"
599
+ },
600
+ "tokens_per_second": {
601
+ "name": "tokens_per_second",
602
+ "measurements": [
603
+ 29.268093305195187,
604
+ 29.274379193406517,
605
+ 29.297596559935723,
606
+ 29.293977941977897,
607
+ 29.26284284088765
608
+ ],
609
+ "mean": 29.279377968280595,
610
+ "median": 29.274379193406517,
611
+ "std": 0.013934260819663154,
612
+ "min": 29.26284284088765,
613
+ "max": 29.297596559935723,
614
+ "p25": 29.268093305195187,
615
+ "p75": 29.293977941977897,
616
+ "p90": 29.29614911275259,
617
+ "p95": 29.296872836344157,
618
+ "p99": 29.29745181521741,
619
+ "unit": "tokens/sec"
620
+ },
621
+ "time_per_output_token_seconds": {
622
+ "name": "time_per_output_token_seconds",
623
+ "measurements": [
624
+ 0.0341668994140625,
625
+ 0.034159562988281246,
626
+ 0.03413249267578125,
627
+ 0.034136708984375,
628
+ 0.034173029785156246
629
+ ],
630
+ "mean": 0.03415373876953125,
631
+ "median": 0.034159562988281246,
632
+ "std": 1.6252245718492983e-05,
633
+ "min": 0.03413249267578125,
634
+ "max": 0.034173029785156246,
635
+ "p25": 0.034136708984375,
636
+ "p75": 0.0341668994140625,
637
+ "p90": 0.03417057763671875,
638
+ "p95": 0.0341718037109375,
639
+ "p99": 0.034172784570312496,
640
+ "unit": "seconds/token"
641
+ }
642
+ },
643
+ "gpu_metrics": {
644
+ "gpu_utilization_mean": 96.75581395348837,
645
+ "gpu_utilization_max": 98,
646
+ "gpu_utilization_min": 94,
647
+ "gpu_memory_used_mean": 13849,
648
+ "gpu_memory_used_max": 13849,
649
+ "gpu_memory_used_min": 13849,
650
+ "sample_count": 86,
651
+ "gpu_monitoring_status": "success"
652
+ }
653
+ },
654
+ {
655
+ "scenario_name": "eager_sdpa_efficient_attention",
656
+ "metadata": {
657
+ "timestamp": "2025-09-24T16:47:27.520398",
658
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
659
+ "hardware_info": {
660
+ "gpu_name": "NVIDIA A10G",
661
+ "gpu_memory_total_mb": 23028,
662
+ "cpu_count": 16,
663
+ "memory_total_mb": 63607,
664
+ "python_version": "3.10.12",
665
+ "torch_version": "2.8.0+cu126",
666
+ "cuda_version": "12.6"
667
+ },
668
+ "config": {
669
+ "name": "eager",
670
+ "model_id": "meta-llama/Llama-2-7b-hf",
671
+ "variant": "eager",
672
+ "warmup_iterations": 3,
673
+ "measurement_iterations": 5,
674
+ "num_tokens_to_generate": 100,
675
+ "device": "cuda",
676
+ "torch_dtype": "float16",
677
+ "compile_mode": null,
678
+ "compile_options": {},
679
+ "use_cache": true,
680
+ "batch_size": 1,
681
+ "sequence_length": null,
682
+ "attn_implementation": "sdpa",
683
+ "sdpa_backend": "efficient_attention",
684
+ "custom_params": {}
685
+ }
686
+ },
687
+ "measurements": {
688
+ "latency_seconds": {
689
+ "name": "latency_seconds",
690
+ "measurements": [
691
+ 3.41412109375,
692
+ 3.41260009765625,
693
+ 3.41233154296875,
694
+ 3.41347509765625,
695
+ 3.41185400390625
696
+ ],
697
+ "mean": 3.4128763671874998,
698
+ "median": 3.41260009765625,
699
+ "std": 0.0008156233630401147,
700
+ "min": 3.41185400390625,
701
+ "max": 3.41412109375,
702
+ "p25": 3.41233154296875,
703
+ "p75": 3.41347509765625,
704
+ "p90": 3.4138626953125,
705
+ "p95": 3.4139918945312497,
706
+ "p99": 3.41409525390625,
707
+ "unit": "seconds"
708
+ },
709
+ "time_to_first_token_seconds": {
710
+ "name": "time_to_first_token_seconds",
711
+ "measurements": [
712
+ 0.03537728118896484,
713
+ 0.034829376220703125,
714
+ 0.03476316833496094,
715
+ 0.03473088073730469,
716
+ 0.034761215209960936
717
+ ],
718
+ "mean": 0.0348923843383789,
719
+ "median": 0.03476316833496094,
720
+ "std": 0.00024457330498252886,
721
+ "min": 0.03473088073730469,
722
+ "max": 0.03537728118896484,
723
+ "p25": 0.034761215209960936,
724
+ "p75": 0.034829376220703125,
725
+ "p90": 0.035158119201660155,
726
+ "p95": 0.035267700195312494,
727
+ "p99": 0.035355364990234374,
728
+ "unit": "seconds"
729
+ },
730
+ "tokens_per_second": {
731
+ "name": "tokens_per_second",
732
+ "measurements": [
733
+ 29.290115158206667,
734
+ 29.30316976450868,
735
+ 29.30547595999402,
736
+ 29.295658277589812,
737
+ 29.30957769163319
738
+ ],
739
+ "mean": 29.30079937038647,
740
+ "median": 29.30316976450868,
741
+ "std": 0.007001876999913273,
742
+ "min": 29.290115158206667,
743
+ "max": 29.30957769163319,
744
+ "p25": 29.295658277589812,
745
+ "p75": 29.30547595999402,
746
+ "p90": 29.30793699897752,
747
+ "p95": 29.308757345305356,
748
+ "p99": 29.309413622367625,
749
+ "unit": "tokens/sec"
750
+ },
751
+ "time_per_output_token_seconds": {
752
+ "name": "time_per_output_token_seconds",
753
+ "measurements": [
754
+ 0.0341412109375,
755
+ 0.0341260009765625,
756
+ 0.0341233154296875,
757
+ 0.034134750976562496,
758
+ 0.0341185400390625
759
+ ],
760
+ "mean": 0.034128763671875,
761
+ "median": 0.0341260009765625,
762
+ "std": 8.15623363040056e-06,
763
+ "min": 0.0341185400390625,
764
+ "max": 0.0341412109375,
765
+ "p25": 0.0341233154296875,
766
+ "p75": 0.034134750976562496,
767
+ "p90": 0.034138626953125,
768
+ "p95": 0.0341399189453125,
769
+ "p99": 0.0341409525390625,
770
+ "unit": "seconds/token"
771
+ }
772
+ },
773
+ "gpu_metrics": {
774
+ "gpu_utilization_mean": 96.73255813953489,
775
+ "gpu_utilization_max": 98,
776
+ "gpu_utilization_min": 93,
777
+ "gpu_memory_used_mean": 13849,
778
+ "gpu_memory_used_max": 13849,
779
+ "gpu_memory_used_min": 13849,
780
+ "sample_count": 86,
781
+ "gpu_monitoring_status": "success"
782
+ }
783
+ },
784
+ {
785
+ "scenario_name": "compiled_compile_max-autotune_sdpa_default",
786
+ "metadata": {
787
+ "timestamp": "2025-09-24T16:48:00.062731",
788
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
789
+ "hardware_info": {
790
+ "gpu_name": "NVIDIA A10G",
791
+ "gpu_memory_total_mb": 23028,
792
+ "cpu_count": 16,
793
+ "memory_total_mb": 63607,
794
+ "python_version": "3.10.12",
795
+ "torch_version": "2.8.0+cu126",
796
+ "cuda_version": "12.6"
797
+ },
798
+ "config": {
799
+ "name": "compiled",
800
+ "model_id": "meta-llama/Llama-2-7b-hf",
801
+ "variant": "compiled",
802
+ "warmup_iterations": 3,
803
+ "measurement_iterations": 5,
804
+ "num_tokens_to_generate": 100,
805
+ "device": "cuda",
806
+ "torch_dtype": "float16",
807
+ "compile_mode": "max-autotune",
808
+ "compile_options": {},
809
+ "use_cache": true,
810
+ "batch_size": 1,
811
+ "sequence_length": null,
812
+ "attn_implementation": "sdpa",
813
+ "sdpa_backend": null,
814
+ "custom_params": {}
815
+ }
816
+ },
817
+ "measurements": {
818
+ "latency_seconds": {
819
+ "name": "latency_seconds",
820
+ "measurements": [
821
+ 3.70151513671875,
822
+ 3.70281005859375,
823
+ 3.717792236328125,
824
+ 3.696918701171875,
825
+ 3.704110107421875
826
+ ],
827
+ "mean": 3.704629248046875,
828
+ "median": 3.70281005859375,
829
+ "std": 0.007014177645636534,
830
+ "min": 3.696918701171875,
831
+ "max": 3.717792236328125,
832
+ "p25": 3.70151513671875,
833
+ "p75": 3.704110107421875,
834
+ "p90": 3.712319384765625,
835
+ "p95": 3.715055810546875,
836
+ "p99": 3.717244951171875,
837
+ "unit": "seconds"
838
+ },
839
+ "time_to_first_token_seconds": {
840
+ "name": "time_to_first_token_seconds",
841
+ "measurements": [
842
+ 0.03803078460693359,
843
+ 0.03749631881713867,
844
+ 0.03756547164916992,
845
+ 0.03767836761474609,
846
+ 0.037459712982177734
847
+ ],
848
+ "mean": 0.03764613113403321,
849
+ "median": 0.03756547164916992,
850
+ "std": 0.00020625173697984167,
851
+ "min": 0.037459712982177734,
852
+ "max": 0.03803078460693359,
853
+ "p25": 0.03749631881713867,
854
+ "p75": 0.03767836761474609,
855
+ "p90": 0.03788981781005859,
856
+ "p95": 0.03796030120849609,
857
+ "p99": 0.03801668792724609,
858
+ "unit": "seconds"
859
+ },
860
+ "tokens_per_second": {
861
+ "name": "tokens_per_second",
862
+ "measurements": [
863
+ 27.015964086708053,
864
+ 27.006516245118423,
865
+ 26.89768379815784,
866
+ 27.049553447929842,
867
+ 26.99703764195113
868
+ ],
869
+ "mean": 26.993351043973057,
870
+ "median": 27.006516245118423,
871
+ "std": 0.05100822553270322,
872
+ "min": 26.89768379815784,
873
+ "max": 27.049553447929842,
874
+ "p25": 26.99703764195113,
875
+ "p75": 27.015964086708053,
876
+ "p90": 27.036117703441125,
877
+ "p95": 27.042835575685483,
878
+ "p99": 27.04820987348097,
879
+ "unit": "tokens/sec"
880
+ },
881
+ "time_per_output_token_seconds": {
882
+ "name": "time_per_output_token_seconds",
883
+ "measurements": [
884
+ 0.0370151513671875,
885
+ 0.0370281005859375,
886
+ 0.03717792236328125,
887
+ 0.03696918701171875,
888
+ 0.03704110107421875
889
+ ],
890
+ "mean": 0.03704629248046874,
891
+ "median": 0.0370281005859375,
892
+ "std": 7.014177645636466e-05,
893
+ "min": 0.03696918701171875,
894
+ "max": 0.03717792236328125,
895
+ "p25": 0.0370151513671875,
896
+ "p75": 0.03704110107421875,
897
+ "p90": 0.03712319384765625,
898
+ "p95": 0.037150558105468745,
899
+ "p99": 0.03717244951171875,
900
+ "unit": "seconds/token"
901
+ }
902
+ },
903
+ "gpu_metrics": {
904
+ "gpu_utilization_mean": 90.19354838709677,
905
+ "gpu_utilization_max": 93,
906
+ "gpu_utilization_min": 86,
907
+ "gpu_memory_used_mean": 13845,
908
+ "gpu_memory_used_max": 13845,
909
+ "gpu_memory_used_min": 13845,
910
+ "sample_count": 93,
911
+ "gpu_monitoring_status": "success"
912
+ }
913
+ },
914
+ {
915
+ "scenario_name": "compiled_compile_max-autotune_sdpa_math",
916
+ "metadata": {
917
+ "timestamp": "2025-09-24T16:48:34.955405",
918
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
919
+ "hardware_info": {
920
+ "gpu_name": "NVIDIA A10G",
921
+ "gpu_memory_total_mb": 23028,
922
+ "cpu_count": 16,
923
+ "memory_total_mb": 63607,
924
+ "python_version": "3.10.12",
925
+ "torch_version": "2.8.0+cu126",
926
+ "cuda_version": "12.6"
927
+ },
928
+ "config": {
929
+ "name": "compiled",
930
+ "model_id": "meta-llama/Llama-2-7b-hf",
931
+ "variant": "compiled",
932
+ "warmup_iterations": 3,
933
+ "measurement_iterations": 5,
934
+ "num_tokens_to_generate": 100,
935
+ "device": "cuda",
936
+ "torch_dtype": "float16",
937
+ "compile_mode": "max-autotune",
938
+ "compile_options": {},
939
+ "use_cache": true,
940
+ "batch_size": 1,
941
+ "sequence_length": null,
942
+ "attn_implementation": "sdpa",
943
+ "sdpa_backend": "math",
944
+ "custom_params": {}
945
+ }
946
+ },
947
+ "measurements": {
948
+ "latency_seconds": {
949
+ "name": "latency_seconds",
950
+ "measurements": [
951
+ 3.96208837890625,
952
+ 3.920692138671875,
953
+ 3.90959033203125,
954
+ 3.9529482421875,
955
+ 3.932615966796875
956
+ ],
957
+ "mean": 3.93558701171875,
958
+ "median": 3.932615966796875,
959
+ "std": 0.0195460814076916,
960
+ "min": 3.90959033203125,
961
+ "max": 3.96208837890625,
962
+ "p25": 3.920692138671875,
963
+ "p75": 3.9529482421875,
964
+ "p90": 3.95843232421875,
965
+ "p95": 3.9602603515625,
966
+ "p99": 3.9617227734375,
967
+ "unit": "seconds"
968
+ },
969
+ "time_to_first_token_seconds": {
970
+ "name": "time_to_first_token_seconds",
971
+ "measurements": [
972
+ 0.04159897613525391,
973
+ 0.04110636901855469,
974
+ 0.040704639434814456,
975
+ 0.04099983978271484,
976
+ 0.04205161666870117
977
+ ],
978
+ "mean": 0.04129228820800781,
979
+ "median": 0.04110636901855469,
980
+ "std": 0.0004766676626406187,
981
+ "min": 0.040704639434814456,
982
+ "max": 0.04205161666870117,
983
+ "p25": 0.04099983978271484,
984
+ "p75": 0.04159897613525391,
985
+ "p90": 0.04187056045532227,
986
+ "p95": 0.041961088562011715,
987
+ "p99": 0.04203351104736328,
988
+ "unit": "seconds"
989
+ },
990
+ "tokens_per_second": {
991
+ "name": "tokens_per_second",
992
+ "measurements": [
993
+ 25.239214887883293,
994
+ 25.505700642406154,
995
+ 25.57812750371838,
996
+ 25.2975738292646,
997
+ 25.42836647267397
998
+ ],
999
+ "mean": 25.40979666718928,
1000
+ "median": 25.42836647267397,
1001
+ "std": 0.126150536620175,
1002
+ "min": 25.239214887883293,
1003
+ "max": 25.57812750371838,
1004
+ "p25": 25.2975738292646,
1005
+ "p75": 25.505700642406154,
1006
+ "p90": 25.54915675919349,
1007
+ "p95": 25.563642131455936,
1008
+ "p99": 25.575230429265893,
1009
+ "unit": "tokens/sec"
1010
+ },
1011
+ "time_per_output_token_seconds": {
1012
+ "name": "time_per_output_token_seconds",
1013
+ "measurements": [
1014
+ 0.0396208837890625,
1015
+ 0.03920692138671875,
1016
+ 0.0390959033203125,
1017
+ 0.039529482421875,
1018
+ 0.03932615966796875
1019
+ ],
1020
+ "mean": 0.0393558701171875,
1021
+ "median": 0.03932615966796875,
1022
+ "std": 0.00019546081407691564,
1023
+ "min": 0.0390959033203125,
1024
+ "max": 0.0396208837890625,
1025
+ "p25": 0.03920692138671875,
1026
+ "p75": 0.039529482421875,
1027
+ "p90": 0.0395843232421875,
1028
+ "p95": 0.039602603515625,
1029
+ "p99": 0.039617227734374996,
1030
+ "unit": "seconds/token"
1031
+ }
1032
+ },
1033
+ "gpu_metrics": {
1034
+ "gpu_utilization_mean": 88.43434343434343,
1035
+ "gpu_utilization_max": 92,
1036
+ "gpu_utilization_min": 83,
1037
+ "gpu_memory_used_mean": 13845,
1038
+ "gpu_memory_used_max": 13845,
1039
+ "gpu_memory_used_min": 13845,
1040
+ "sample_count": 99,
1041
+ "gpu_monitoring_status": "success"
1042
+ }
1043
+ },
1044
+ {
1045
+ "scenario_name": "compiled_compile_max-autotune_sdpa_efficient_attention",
1046
+ "metadata": {
1047
+ "timestamp": "2025-09-24T16:49:16.535348",
1048
+ "commit_id": "7258ea44bc0c0a425a468f66f8559d1de8c4126d",
1049
+ "hardware_info": {
1050
+ "gpu_name": "NVIDIA A10G",
1051
+ "gpu_memory_total_mb": 23028,
1052
+ "cpu_count": 16,
1053
+ "memory_total_mb": 63607,
1054
+ "python_version": "3.10.12",
1055
+ "torch_version": "2.8.0+cu126",
1056
+ "cuda_version": "12.6"
1057
+ },
1058
+ "config": {
1059
+ "name": "compiled",
1060
+ "model_id": "meta-llama/Llama-2-7b-hf",
1061
+ "variant": "compiled",
1062
+ "warmup_iterations": 3,
1063
+ "measurement_iterations": 5,
1064
+ "num_tokens_to_generate": 100,
1065
+ "device": "cuda",
1066
+ "torch_dtype": "float16",
1067
+ "compile_mode": "max-autotune",
1068
+ "compile_options": {},
1069
+ "use_cache": true,
1070
+ "batch_size": 1,
1071
+ "sequence_length": null,
1072
+ "attn_implementation": "sdpa",
1073
+ "sdpa_backend": "efficient_attention",
1074
+ "custom_params": {}
1075
+ }
1076
+ },
1077
+ "measurements": {
1078
+ "latency_seconds": {
1079
+ "name": "latency_seconds",
1080
+ "measurements": [
1081
+ 3.70085888671875,
1082
+ 3.708667724609375,
1083
+ 3.7039189453125,
1084
+ 3.7002392578125,
1085
+ 3.69233154296875
1086
+ ],
1087
+ "mean": 3.7012032714843754,
1088
+ "median": 3.70085888671875,
1089
+ "std": 0.0053450619243819875,
1090
+ "min": 3.69233154296875,
1091
+ "max": 3.708667724609375,
1092
+ "p25": 3.7002392578125,
1093
+ "p75": 3.7039189453125,
1094
+ "p90": 3.706768212890625,
1095
+ "p95": 3.70771796875,
1096
+ "p99": 3.7084777734375,
1097
+ "unit": "seconds"
1098
+ },
1099
+ "time_to_first_token_seconds": {
1100
+ "name": "time_to_first_token_seconds",
1101
+ "measurements": [
1102
+ 0.038309761047363285,
1103
+ 0.03758179092407227,
1104
+ 0.03844585418701172,
1105
+ 0.03750716781616211,
1106
+ 0.037865886688232424
1107
+ ],
1108
+ "mean": 0.03794209213256836,
1109
+ "median": 0.037865886688232424,
1110
+ "std": 0.0003778200709319446,
1111
+ "min": 0.03750716781616211,
1112
+ "max": 0.03844585418701172,
1113
+ "p25": 0.03758179092407227,
1114
+ "p75": 0.038309761047363285,
1115
+ "p90": 0.03839141693115235,
1116
+ "p95": 0.03841863555908203,
1117
+ "p99": 0.03844041046142578,
1118
+ "unit": "seconds"
1119
+ },
1120
+ "tokens_per_second": {
1121
+ "name": "tokens_per_second",
1122
+ "measurements": [
1123
+ 27.020754657484886,
1124
+ 26.963860724549747,
1125
+ 26.998430979855847,
1126
+ 27.025279456960792,
1127
+ 27.083158388208247
1128
+ ],
1129
+ "mean": 27.018296841411903,
1130
+ "median": 27.020754657484886,
1131
+ "std": 0.0390378311563571,
1132
+ "min": 26.963860724549747,
1133
+ "max": 27.083158388208247,
1134
+ "p25": 26.998430979855847,
1135
+ "p75": 27.025279456960792,
1136
+ "p90": 27.060006815709265,
1137
+ "p95": 27.071582601958756,
1138
+ "p99": 27.080843230958347,
1139
+ "unit": "tokens/sec"
1140
+ },
1141
+ "time_per_output_token_seconds": {
1142
+ "name": "time_per_output_token_seconds",
1143
+ "measurements": [
1144
+ 0.037008588867187504,
1145
+ 0.03708667724609375,
1146
+ 0.037039189453125,
1147
+ 0.037002392578125,
1148
+ 0.0369233154296875
1149
+ ],
1150
+ "mean": 0.037012032714843754,
1151
+ "median": 0.037008588867187504,
1152
+ "std": 5.3450619243820804e-05,
1153
+ "min": 0.0369233154296875,
1154
+ "max": 0.03708667724609375,
1155
+ "p25": 0.037002392578125,
1156
+ "p75": 0.037039189453125,
1157
+ "p90": 0.03706768212890625,
1158
+ "p95": 0.037077179687499996,
1159
+ "p99": 0.037084777734375,
1160
+ "unit": "seconds/token"
1161
+ }
1162
+ },
1163
+ "gpu_metrics": {
1164
+ "gpu_utilization_mean": 90.2258064516129,
1165
+ "gpu_utilization_max": 93,
1166
+ "gpu_utilization_min": 86,
1167
+ "gpu_memory_used_mean": 13845,
1168
+ "gpu_memory_used_max": 13845,
1169
+ "gpu_memory_used_min": 13845,
1170
+ "sample_count": 93,
1171
+ "gpu_monitoring_status": "success"
1172
+ }
1173
+ }
1174
+ ]
1175
+ }