davanstrien HF Staff commited on
Commit
a875142
·
1 Parent(s): 0f7b9e2

Enhance dataset card creation with filtering statistics and add option to skip long prompts

Browse files
Files changed (1) hide show
  1. generate-responses.py +88 -11
generate-responses.py CHANGED
@@ -87,8 +87,25 @@ def create_dataset_card(
87
  tensor_parallel_size: int,
88
  num_examples: int,
89
  generation_time: str,
 
 
90
  ) -> str:
91
  """Create a comprehensive dataset card documenting the generation process."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  return f"""---
93
  viewer: false
94
  tags:
@@ -107,7 +124,7 @@ This dataset contains generated responses for prompts from [{source_dataset}](ht
107
  - **Messages Column**: `{messages_column}`
108
  - **Model**: [{model_id}](https://huggingface.co/{model_id})
109
  - **Number of Examples**: {num_examples:,}
110
- - **Generation Date**: {generation_time}
111
 
112
  ### Sampling Parameters
113
 
@@ -143,7 +160,7 @@ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-respons
143
  --temperature {sampling_params.temperature} \\
144
  --top-p {sampling_params.top_p} \\
145
  --top-k {sampling_params.top_k} \\
146
- --max-tokens {sampling_params.max_tokens}
147
  ```
148
  """
149
 
@@ -163,6 +180,7 @@ def main(
163
  gpu_memory_utilization: float = 0.90,
164
  max_model_len: Optional[int] = None,
165
  tensor_parallel_size: Optional[int] = None,
 
166
  hf_token: Optional[str] = None,
167
  ):
168
  """
@@ -183,6 +201,7 @@ def main(
183
  gpu_memory_utilization: GPU memory utilization factor
184
  max_model_len: Maximum model context length (None uses model default)
185
  tensor_parallel_size: Number of GPUs to use (auto-detect if None)
 
186
  hf_token: Hugging Face authentication token
187
  """
188
  generation_start_time = datetime.now().isoformat()
@@ -254,29 +273,72 @@ def main(
254
  )
255
  sys.exit(1)
256
 
 
 
 
 
 
 
 
 
257
  # Process messages and apply chat template
258
  logger.info("Applying chat template to messages...")
259
- prompts = []
260
- for example in tqdm(dataset, desc="Processing messages"):
 
 
 
 
261
  messages = example[messages_column]
262
  # Apply chat template
263
  prompt = tokenizer.apply_chat_template(
264
  messages, tokenize=False, add_generation_prompt=True
265
  )
266
- prompts.append(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  # Generate responses - vLLM handles batching internally
269
- logger.info(f"Starting generation for {len(prompts):,} prompts...")
270
  logger.info("vLLM will handle batching and scheduling automatically")
271
 
272
- outputs = llm.generate(prompts, sampling_params)
273
 
274
- # Extract generated text
275
  logger.info("Extracting generated responses...")
276
- responses = []
277
- for output in outputs:
 
 
278
  response = output.outputs[0].text.strip()
279
- responses.append(response)
280
 
281
  # Add responses to dataset
282
  logger.info("Adding responses to dataset...")
@@ -292,6 +354,8 @@ def main(
292
  tensor_parallel_size=tensor_parallel_size,
293
  num_examples=total_examples,
294
  generation_time=generation_start_time,
 
 
295
  )
296
 
297
  # Push dataset to hub
@@ -416,6 +480,18 @@ Examples:
416
  type=str,
417
  help="Hugging Face token (can also use HF_TOKEN env var)",
418
  )
 
 
 
 
 
 
 
 
 
 
 
 
419
 
420
  args = parser.parse_args()
421
 
@@ -434,6 +510,7 @@ Examples:
434
  gpu_memory_utilization=args.gpu_memory_utilization,
435
  max_model_len=args.max_model_len,
436
  tensor_parallel_size=args.tensor_parallel_size,
 
437
  hf_token=args.hf_token,
438
  )
439
  else:
 
87
  tensor_parallel_size: int,
88
  num_examples: int,
89
  generation_time: str,
90
+ num_skipped: int = 0,
91
+ max_model_len_used: Optional[int] = None,
92
  ) -> str:
93
  """Create a comprehensive dataset card documenting the generation process."""
94
+ filtering_section = ""
95
+ if num_skipped > 0:
96
+ skip_percentage = (num_skipped / num_examples) * 100
97
+ processed = num_examples - num_skipped
98
+ filtering_section = f"""
99
+
100
+ ### Filtering Statistics
101
+
102
+ - **Total Examples**: {num_examples:,}
103
+ - **Processed**: {processed:,} ({100 - skip_percentage:.1f}%)
104
+ - **Skipped (too long)**: {num_skipped:,} ({skip_percentage:.1f}%)
105
+ - **Max Model Length Used**: {max_model_len_used:,} tokens
106
+
107
+ Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
108
+
109
  return f"""---
110
  viewer: false
111
  tags:
 
124
  - **Messages Column**: `{messages_column}`
125
  - **Model**: [{model_id}](https://huggingface.co/{model_id})
126
  - **Number of Examples**: {num_examples:,}
127
+ - **Generation Date**: {generation_time}{filtering_section}
128
 
129
  ### Sampling Parameters
130
 
 
160
  --temperature {sampling_params.temperature} \\
161
  --top-p {sampling_params.top_p} \\
162
  --top-k {sampling_params.top_k} \\
163
+ --max-tokens {sampling_params.max_tokens}{f' \\\\\\n --max-model-len {max_model_len_used}' if max_model_len_used else ''}
164
  ```
165
  """
166
 
 
180
  gpu_memory_utilization: float = 0.90,
181
  max_model_len: Optional[int] = None,
182
  tensor_parallel_size: Optional[int] = None,
183
+ skip_long_prompts: bool = True,
184
  hf_token: Optional[str] = None,
185
  ):
186
  """
 
201
  gpu_memory_utilization: GPU memory utilization factor
202
  max_model_len: Maximum model context length (None uses model default)
203
  tensor_parallel_size: Number of GPUs to use (auto-detect if None)
204
+ skip_long_prompts: Skip prompts exceeding max_model_len instead of failing
205
  hf_token: Hugging Face authentication token
206
  """
207
  generation_start_time = datetime.now().isoformat()
 
273
  )
274
  sys.exit(1)
275
 
276
+ # Get effective max length for filtering
277
+ if max_model_len is not None:
278
+ effective_max_len = max_model_len
279
+ else:
280
+ # Get model's default max length
281
+ effective_max_len = llm.llm_engine.model_config.max_model_len
282
+ logger.info(f"Using effective max model length: {effective_max_len}")
283
+
284
  # Process messages and apply chat template
285
  logger.info("Applying chat template to messages...")
286
+ all_prompts = []
287
+ valid_prompts = []
288
+ valid_indices = []
289
+ skipped_info = []
290
+
291
+ for i, example in enumerate(tqdm(dataset, desc="Processing messages")):
292
  messages = example[messages_column]
293
  # Apply chat template
294
  prompt = tokenizer.apply_chat_template(
295
  messages, tokenize=False, add_generation_prompt=True
296
  )
297
+ all_prompts.append(prompt)
298
+
299
+ # Count tokens if filtering is enabled
300
+ if skip_long_prompts:
301
+ tokens = tokenizer.encode(prompt)
302
+ if len(tokens) <= effective_max_len:
303
+ valid_prompts.append(prompt)
304
+ valid_indices.append(i)
305
+ else:
306
+ skipped_info.append((i, len(tokens)))
307
+ else:
308
+ valid_prompts.append(prompt)
309
+ valid_indices.append(i)
310
+
311
+ # Log filtering results
312
+ if skip_long_prompts and skipped_info:
313
+ logger.warning(f"Skipped {len(skipped_info)} prompts that exceed max_model_len ({effective_max_len} tokens)")
314
+ logger.info("Skipped prompt details (first 10):")
315
+ for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]):
316
+ logger.info(f" - Example {prompt_idx}: {token_count} tokens (exceeds by {token_count - effective_max_len})")
317
+ if len(skipped_info) > 10:
318
+ logger.info(f" ... and {len(skipped_info) - 10} more")
319
+
320
+ skip_percentage = (len(skipped_info) / total_examples) * 100
321
+ if skip_percentage > 10:
322
+ logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!")
323
+
324
+ if not valid_prompts:
325
+ logger.error("No valid prompts to process after filtering!")
326
+ sys.exit(1)
327
 
328
  # Generate responses - vLLM handles batching internally
329
+ logger.info(f"Starting generation for {len(valid_prompts):,} valid prompts...")
330
  logger.info("vLLM will handle batching and scheduling automatically")
331
 
332
+ outputs = llm.generate(valid_prompts, sampling_params)
333
 
334
+ # Extract generated text and create full response list
335
  logger.info("Extracting generated responses...")
336
+ responses = [""] * total_examples # Initialize with empty strings
337
+
338
+ for idx, output in enumerate(outputs):
339
+ original_idx = valid_indices[idx]
340
  response = output.outputs[0].text.strip()
341
+ responses[original_idx] = response
342
 
343
  # Add responses to dataset
344
  logger.info("Adding responses to dataset...")
 
354
  tensor_parallel_size=tensor_parallel_size,
355
  num_examples=total_examples,
356
  generation_time=generation_start_time,
357
+ num_skipped=len(skipped_info) if skip_long_prompts else 0,
358
+ max_model_len_used=effective_max_len if skip_long_prompts else None,
359
  )
360
 
361
  # Push dataset to hub
 
480
  type=str,
481
  help="Hugging Face token (can also use HF_TOKEN env var)",
482
  )
483
+ parser.add_argument(
484
+ "--skip-long-prompts",
485
+ action="store_true",
486
+ default=True,
487
+ help="Skip prompts that exceed max_model_len instead of failing (default: True)",
488
+ )
489
+ parser.add_argument(
490
+ "--no-skip-long-prompts",
491
+ dest="skip_long_prompts",
492
+ action="store_false",
493
+ help="Fail on prompts that exceed max_model_len",
494
+ )
495
 
496
  args = parser.parse_args()
497
 
 
510
  gpu_memory_utilization=args.gpu_memory_utilization,
511
  max_model_len=args.max_model_len,
512
  tensor_parallel_size=args.tensor_parallel_size,
513
+ skip_long_prompts=args.skip_long_prompts,
514
  hf_token=args.hf_token,
515
  )
516
  else: