File size: 22,587 Bytes
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d272f1c
52de1e3
 
d272f1c
7e8dc67
d272f1c
52de1e3
 
 
 
 
 
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
d272f1c
52de1e3
d272f1c
52de1e3
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
 
 
 
 
 
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
 
 
 
 
 
d272f1c
52de1e3
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
 
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
d272f1c
52de1e3
d272f1c
 
 
 
 
 
 
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
 
7e8dc67
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
 
d272f1c
 
52de1e3
 
 
 
 
d272f1c
52de1e3
d272f1c
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
 
 
 
 
 
d272f1c
52de1e3
 
d272f1c
52de1e3
 
d272f1c
 
52de1e3
 
 
 
 
d272f1c
52de1e3
 
 
d272f1c
 
52de1e3
d272f1c
52de1e3
 
d272f1c
52de1e3
 
 
d272f1c
 
 
 
 
 
 
 
52de1e3
 
 
d272f1c
52de1e3
 
 
 
 
d272f1c
acf6917
 
d272f1c
 
acf6917
 
 
 
 
 
d272f1c
acf6917
 
 
 
 
 
 
 
 
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
d272f1c
52de1e3
 
 
 
 
 
 
d272f1c
 
 
 
 
 
 
 
52de1e3
 
 
 
d272f1c
 
 
 
 
 
 
52de1e3
 
 
 
d272f1c
 
52de1e3
 
 
d272f1c
52de1e3
 
 
 
 
d272f1c
52de1e3
 
d272f1c
 
 
 
 
 
 
 
 
52de1e3
 
 
 
 
 
 
 
 
 
 
 
 
 
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
d272f1c
 
52de1e3
 
 
d272f1c
52de1e3
d272f1c
52de1e3
 
d272f1c
52de1e3
 
 
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
d272f1c
52de1e3
 
 
d272f1c
 
52de1e3
 
d272f1c
 
 
 
52de1e3
 
 
d272f1c
 
 
 
52de1e3
 
d272f1c
 
 
 
52de1e3
 
 
 
 
d272f1c
 
 
 
 
 
 
52de1e3
 
 
d272f1c
acf6917
52de1e3
 
 
 
7e8dc67
d272f1c
 
 
 
52de1e3
 
 
 
 
 
 
 
d272f1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52de1e3
 
 
 
 
 
 
 
 
d272f1c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.10"
# dependencies = [
#     "vllm>=0.6.6",
#     "transformers>=4.53.0",
#     "torch",
#     "datasets",
#     "huggingface-hub[hf_transfer]",
# ]
# ///

"""
Classify text columns in Hugging Face datasets using vLLM with structured outputs.

This script provides efficient GPU-based classification with guaranteed valid outputs,
optimized for running on HF Jobs.

Example:
    uv run classify-dataset.py \\
        --input-dataset imdb \\
        --column text \\
        --labels "positive,negative" \\
        --output-dataset user/imdb-classified

HF Jobs example:
    hfjobs run --flavor a10 uv run classify-dataset.py \\
        --input-dataset user/emails \\
        --column content \\
        --labels "spam,ham" \\
        --output-dataset user/emails-classified \\
        --prompt-style reasoning
"""

import argparse
import logging
import os
import sys
from typing import List

import torch
from datasets import load_dataset
from huggingface_hub import HfApi, get_token
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from vllm.sampling_params import GuidedDecodingParams

# Default model - SmolLM3 for good balance of speed and quality
DEFAULT_MODEL = "HuggingFaceTB/SmolLM3-3B"

def parse_label_descriptions(desc_string: str) -> dict:
    """Parse label descriptions from CLI format 'label1:desc1,label2:desc2'."""
    if not desc_string:
        return {}
    
    descriptions = {}
    # Split by comma, but be careful about commas in descriptions
    parts = desc_string.split(',')
    
    current_label = None
    current_desc_parts = []
    
    for part in parts:
        if ':' in part and not current_label:
            # New label:description pair
            label, desc = part.split(':', 1)
            current_label = label.strip()
            current_desc_parts = [desc.strip()]
        elif ':' in part and current_label:
            # Save previous label and start new one
            descriptions[current_label] = ','.join(current_desc_parts)
            label, desc = part.split(':', 1)
            current_label = label.strip()
            current_desc_parts = [desc.strip()]
        else:
            # Continuation of previous description (had comma in it)
            current_desc_parts.append(part.strip())
    
    # Don't forget the last one
    if current_label:
        descriptions[current_label] = ','.join(current_desc_parts)
    
    return descriptions


def create_messages(text: str, labels: List[str], label_descriptions: dict = None, enable_reasoning: bool = False) -> List[dict]:
    """Create messages for chat template with optional label descriptions."""
    
    # Build the classification prompt
    if label_descriptions:
        # Format with descriptions
        categories_text = "Categories:\n"
        for label in labels:
            desc = label_descriptions.get(label, "")
            if desc:
                categories_text += f"- {label}: {desc}\n"
            else:
                categories_text += f"- {label}\n"
    else:
        # Simple format without descriptions
        categories_text = f"Categories: {', '.join(labels)}"
    
    if enable_reasoning:
        # Reasoning mode: allow thinking and request JSON output
        user_content = f"""Classify this text into one of these categories:

{categories_text}

Text: {text}

Think through your classification step by step, then provide your final answer in this JSON format:
{{"label": "your_chosen_label"}}"""
        
        system_content = "You are a helpful classification assistant that thinks step by step."
    else:
        # Structured output mode: fast classification
        if label_descriptions:
            user_content = f"Classify this text into one of these categories:\n\n{categories_text}\nText: {text}\n\nCategory:"
        else:
            user_content = f"Classify this text as one of: {', '.join(labels)}\n\nText: {text}\n\nLabel:"
        
        system_content = "You are a helpful classification assistant. /no_think"
    
    return [
        {"role": "system", "content": system_content},
        {"role": "user", "content": user_content}
    ]

# Minimum text length for valid classification
MIN_TEXT_LENGTH = 3

# Maximum text length (in characters) to avoid context overflow
MAX_TEXT_LENGTH = 4000


def parse_reasoning_output(output: str, valid_labels: List[str]) -> tuple[str, str, bool]:
    """Parse reasoning output to extract label from JSON after </think> tag.
    
    Returns:
        tuple: (label or None, full reasoning text, parsing_success)
    """
    import json
    
    # Find the </think> tag
    think_end = output.find("</think>")
    
    if think_end != -1:
        # Extract everything after </think>
        json_part = output[think_end + len("</think>"):].strip()
        reasoning = output[:think_end + len("</think>")]
    else:
        # No think tags, look for JSON in the output
        # Try to find JSON by looking for {
        json_start = output.find("{")
        if json_start != -1:
            json_part = output[json_start:].strip()
            reasoning = output[:json_start].strip() if json_start > 0 else ""
        else:
            json_part = output
            reasoning = output
    
    # Try to parse JSON
    try:
        # Find the first complete JSON object
        if "{" in json_part:
            # Extract just the JSON object
            json_str = json_part[json_part.find("{"):]
            # Find the matching closing brace
            brace_count = 0
            end_pos = 0
            for i, char in enumerate(json_str):
                if char == "{":
                    brace_count += 1
                elif char == "}":
                    brace_count -= 1
                    if brace_count == 0:
                        end_pos = i + 1
                        break
            
            if end_pos > 0:
                json_str = json_str[:end_pos]
                data = json.loads(json_str)
                label = data.get("label", "")
                
                # Validate label
                if label in valid_labels:
                    return label, output, True
                else:
                    logger.warning(f"Parsed label '{label}' not in valid labels: {valid_labels}")
                    return None, output, False
            else:
                logger.warning("Could not find complete JSON object")
                return None, output, False
        else:
            logger.warning("No JSON found in output")
            return None, output, False
            
    except json.JSONDecodeError as e:
        logger.warning(f"JSON parsing error: {e}")
        return None, output, False
    except Exception as e:
        logger.warning(f"Unexpected error parsing output: {e}")
        return None, output, False

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


def parse_args():
    parser = argparse.ArgumentParser(
        description="Classify text in HuggingFace datasets using vLLM with structured outputs",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=__doc__,
    )

    # Required arguments
    parser.add_argument(
        "--input-dataset",
        type=str,
        required=True,
        help="Input dataset ID on Hugging Face Hub",
    )
    parser.add_argument(
        "--column", type=str, required=True, help="Name of the text column to classify"
    )
    parser.add_argument(
        "--labels",
        type=str,
        required=True,
        help="Comma-separated list of classification labels (e.g., 'positive,negative')",
    )
    parser.add_argument(
        "--output-dataset",
        type=str,
        required=True,
        help="Output dataset ID on Hugging Face Hub",
    )

    # Optional arguments
    parser.add_argument(
        "--model",
        type=str,
        default=DEFAULT_MODEL,
        help=f"Model to use for classification (default: {DEFAULT_MODEL})",
    )
    # Removed --batch-size argument as vLLM handles batching internally
    parser.add_argument(
        "--label-descriptions",
        type=str,
        default=None,
        help="Descriptions for each label in format 'label1:description1,label2:description2'",
    )
    parser.add_argument(
        "--enable-reasoning",
        action="store_true",
        help="Enable reasoning mode with thinking traces (disables structured outputs)",
    )
    parser.add_argument(
        "--max-samples",
        type=int,
        default=None,
        help="Maximum number of samples to process (for testing)",
    )
    parser.add_argument(
        "--hf-token",
        type=str,
        default=None,
        help="Hugging Face API token (default: auto-detect from HF_TOKEN env var or huggingface-cli login)",
    )
    parser.add_argument(
        "--split",
        type=str,
        default="train",
        help="Dataset split to process (default: train)",
    )
    parser.add_argument(
        "--temperature",
        type=float,
        default=0.1,
        help="Temperature for generation (default: 0.1)",
    )
    parser.add_argument(
        "--max-tokens",
        type=int,
        default=100,
        help="Maximum tokens to generate (default: 100, automatically increased 20x for reasoning mode)",
    )
    parser.add_argument(
        "--guided-backend",
        type=str,
        default="outlines",
        help="Guided decoding backend (default: outlines)",
    )
    parser.add_argument(
        "--shuffle",
        action="store_true",
        help="Shuffle dataset before selecting samples (useful with --max-samples for random sampling)",
    )
    parser.add_argument(
        "--shuffle-seed",
        type=int,
        default=42,
        help="Random seed for shuffling (default: 42)",
    )

    return parser.parse_args()


def preprocess_text(text: str) -> str:
    """Preprocess text for classification."""
    if not text or not isinstance(text, str):
        return ""

    # Strip whitespace
    text = text.strip()

    # Truncate if too long
    if len(text) > MAX_TEXT_LENGTH:
        text = f"{text[:MAX_TEXT_LENGTH]}..."

    return text


def validate_text(text: str) -> bool:
    """Check if text is valid for classification."""
    return bool(text and len(text) >= MIN_TEXT_LENGTH)


def prepare_prompts(
    texts: List[str], labels: List[str], tokenizer: AutoTokenizer, 
    label_descriptions: dict = None, enable_reasoning: bool = False
) -> tuple[List[str], List[int]]:
    """Prepare prompts using chat template for classification, filtering invalid texts."""
    prompts = []
    valid_indices = []

    for i, text in enumerate(texts):
        processed_text = preprocess_text(text)
        if validate_text(processed_text):
            # Create messages for chat template
            messages = create_messages(processed_text, labels, label_descriptions, enable_reasoning)
            
            # Apply chat template
            prompt = tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True
            )
            prompts.append(prompt)
            valid_indices.append(i)

    return prompts, valid_indices


def main():
    args = parse_args()

    # Check authentication early
    logger.info("Checking authentication...")
    token = args.hf_token or (os.environ.get("HF_TOKEN") or get_token())

    if not token:
        logger.error("No authentication token found. Please either:")
        logger.error("1. Run 'huggingface-cli login'")
        logger.error("2. Set HF_TOKEN environment variable")
        logger.error("3. Pass --hf-token argument")
        sys.exit(1)

    # Validate token by checking who we are
    try:
        api = HfApi(token=token)
        user_info = api.whoami()
        logger.info(f"Authenticated as: {user_info['name']}")
    except Exception as e:
        logger.error(f"Authentication failed: {e}")
        logger.error("Please check your token is valid")
        sys.exit(1)

    # Check CUDA availability
    if not torch.cuda.is_available():
        logger.error("CUDA is not available. This script requires a GPU.")
        logger.error("Please run on a machine with GPU support or use HF Jobs.")
        sys.exit(1)

    logger.info(f"CUDA available. Using device: {torch.cuda.get_device_name(0)}")

    # Parse and validate labels
    labels = [label.strip() for label in args.labels.split(",")]
    if len(labels) < 2:
        logger.error("At least two labels are required for classification.")
        sys.exit(1)
    logger.info(f"Classification labels: {labels}")
    
    # Parse label descriptions if provided
    label_descriptions = None
    if args.label_descriptions:
        label_descriptions = parse_label_descriptions(args.label_descriptions)
        logger.info("Label descriptions provided:")
        for label, desc in label_descriptions.items():
            logger.info(f"  {label}: {desc}")

    # Load dataset
    logger.info(f"Loading dataset: {args.input_dataset}")
    try:
        dataset = load_dataset(args.input_dataset, split=args.split)
        logger.info(f"Loaded {len(dataset)} samples from split '{args.split}'")

        # Shuffle if requested
        if args.shuffle:
            logger.info(f"Shuffling dataset with seed {args.shuffle_seed}")
            dataset = dataset.shuffle(seed=args.shuffle_seed)

        # Limit samples if specified
        if args.max_samples:
            dataset = dataset.select(range(min(args.max_samples, len(dataset))))
            logger.info(f"Limited dataset to {len(dataset)} samples")
            if args.shuffle:
                logger.info("Note: Samples were randomly selected due to shuffling")
    except Exception as e:
        logger.error(f"Failed to load dataset: {e}")
        sys.exit(1)

    # Verify column exists
    if args.column not in dataset.column_names:
        logger.error(f"Column '{args.column}' not found in dataset.")
        logger.error(f"Available columns: {dataset.column_names}")
        sys.exit(1)

    # Extract texts
    texts = dataset[args.column]

    # Load tokenizer for chat template formatting
    logger.info(f"Loading tokenizer for {args.model}")
    try:
        tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
    except Exception as e:
        logger.error(f"Failed to load tokenizer: {e}")
        sys.exit(1)

    # Initialize vLLM
    logger.info(f"Initializing vLLM with model: {args.model}")
    logger.info(f"Using guided decoding backend: {args.guided_backend}")
    try:
        llm = LLM(
            model=args.model,
            trust_remote_code=True,
            dtype="auto",
            gpu_memory_utilization=0.95,
            guided_decoding_backend=args.guided_backend,
        )
    except Exception as e:
        logger.error(f"Failed to initialize vLLM: {e}")
        sys.exit(1)

    # Set up sampling parameters based on mode
    if args.enable_reasoning:
        # Reasoning mode: no guided decoding, much more tokens for thinking
        sampling_params = SamplingParams(
            temperature=args.temperature,
            max_tokens=args.max_tokens * 20,  # 20x more tokens for extensive reasoning
        )
        logger.info("Using reasoning mode - model will generate thinking traces with JSON output")
    else:
        # Structured output mode: guided decoding
        guided_params = GuidedDecodingParams(choice=labels)
        sampling_params = SamplingParams(
            guided_decoding=guided_params,
            temperature=args.temperature,
            max_tokens=args.max_tokens,
        )
        logger.info("Using structured output with guided_choice - outputs guaranteed to be valid labels")

    # Prepare all prompts
    logger.info("Preparing prompts for classification...")
    all_prompts, valid_indices = prepare_prompts(texts, labels, tokenizer, label_descriptions, args.enable_reasoning)

    if not all_prompts:
        logger.error("No valid texts found for classification.")
        sys.exit(1)

    logger.info(f"Prepared {len(all_prompts)} valid prompts out of {len(texts)} texts")

    # Let vLLM handle batching internally
    logger.info("Starting classification (vLLM will handle batching internally)...")

    try:
        # Generate all classifications at once - vLLM handles batching
        outputs = llm.generate(all_prompts, sampling_params)

        # Process outputs based on mode
        if args.enable_reasoning:
            # Reasoning mode: parse JSON and extract reasoning
            all_classifications = [None] * len(texts)
            all_reasoning = [None] * len(texts)
            all_parsing_success = [False] * len(texts)
            
            for idx, output in enumerate(outputs):
                original_idx = valid_indices[idx]
                generated_text = output.outputs[0].text.strip()
                
                # Parse the reasoning output
                label, reasoning, success = parse_reasoning_output(generated_text, labels)
                
                all_classifications[original_idx] = label
                all_reasoning[original_idx] = reasoning
                all_parsing_success[original_idx] = success
                
                # Log first few examples
                if idx < 3:
                    logger.info(f"\nExample {idx + 1} output:")
                    logger.info(f"Raw output: {generated_text[:200]}...")
                    logger.info(f"Parsed label: {label}")
                    logger.info(f"Parsing success: {success}")
            
            # Count parsing statistics
            parsing_success_count = sum(1 for s in all_parsing_success if s)
            parsing_fail_count = sum(1 for s in all_parsing_success if s is not None and not s)
            logger.info(f"\nParsing statistics:")
            logger.info(f"  Successful: {parsing_success_count}/{len(valid_indices)} ({parsing_success_count/len(valid_indices)*100:.1f}%)")
            logger.info(f"  Failed: {parsing_fail_count}/{len(valid_indices)} ({parsing_fail_count/len(valid_indices)*100:.1f}%)")
            
            valid_texts = parsing_success_count
        else:
            # Structured output mode: direct classification
            all_classifications = [None] * len(texts)
            for idx, output in enumerate(outputs):
                original_idx = valid_indices[idx]
                generated_text = output.outputs[0].text.strip()
                all_classifications[original_idx] = generated_text
            
            valid_texts = len(valid_indices)

        # Count statistics
        total_texts = len(texts)

    except Exception as e:
        logger.error(f"Classification failed: {e}")
        sys.exit(1)

    # Add columns to dataset
    dataset = dataset.add_column("classification", all_classifications)
    
    if args.enable_reasoning:
        dataset = dataset.add_column("reasoning", all_reasoning)
        dataset = dataset.add_column("parsing_success", all_parsing_success)

    # Calculate statistics
    none_count = total_texts - valid_texts
    if none_count > 0:
        logger.warning(
            f"{none_count} texts were too short or invalid for classification"
        )

    # Show classification distribution
    label_counts = {label: all_classifications.count(label) for label in labels}
    
    # Count None values separately
    none_classifications = all_classifications.count(None)
    
    logger.info("Classification distribution:")
    for label, count in label_counts.items():
        percentage = count / total_texts * 100 if total_texts > 0 else 0
        logger.info(f"  {label}: {count} ({percentage:.1f}%)")
    
    if none_classifications > 0:
        none_percentage = none_classifications / total_texts * 100
        if args.enable_reasoning:
            logger.info(f"  Failed to parse: {none_classifications} ({none_percentage:.1f}%)")
        else:
            logger.info(f"  Invalid/Skipped: {none_classifications} ({none_percentage:.1f}%)")

    # Log success rate
    success_rate = (valid_texts / total_texts * 100) if total_texts > 0 else 0
    logger.info(f"Classification success rate: {success_rate:.1f}%")

    # Save to Hub (token already validated at start)
    logger.info(f"Pushing dataset to Hub: {args.output_dataset}")
    try:
        dataset.push_to_hub(
            args.output_dataset,
            token=token,
            commit_message=f"Add classifications using {args.model} {'with reasoning' if args.enable_reasoning else 'with structured outputs'}",
        )
        logger.info(
            f"Successfully pushed to: https://huggingface.co/datasets/{args.output_dataset}"
        )
    except Exception as e:
        logger.error(f"Failed to push to Hub: {e}")
        sys.exit(1)


if __name__ == "__main__":
    if len(sys.argv) == 1:
        print("Example commands:")
        print("\n# Simple classification:")
        print("uv run classify-dataset.py \\")
        print("  --input-dataset stanfordnlp/imdb \\")
        print("  --column text \\")
        print("  --labels 'positive,negative' \\")
        print("  --output-dataset user/imdb-classified")
        print("\n# With label descriptions:")
        print("uv run classify-dataset.py \\")
        print("  --input-dataset user/support-tickets \\")
        print("  --column content \\")
        print("  --labels 'bug,feature,question' \\")
        print("  --label-descriptions 'bug:something is broken or not working,feature:request for new functionality,question:asking for help or clarification' \\")
        print("  --output-dataset user/tickets-classified")
        print("\n# With reasoning mode (thinking + JSON output):")
        print("uv run classify-dataset.py \\")
        print("  --input-dataset stanfordnlp/imdb \\")
        print("  --column text \\")
        print("  --labels 'positive,negative,neutral' \\")
        print("  --enable-reasoning \\")
        print("  --output-dataset user/imdb-reasoned")
        print("\n# HF Jobs example:")
        print("hf jobs uv run \\")
        print("  --flavor l4x1 \\")
        print("  --image vllm/vllm-openai:latest \\")
        print("  classify-dataset.py \\")
        print("  --input-dataset stanfordnlp/imdb \\")
        print("  --column text \\")
        print("  --labels 'positive,negative' \\")
        print("  --output-dataset user/imdb-classified")
        sys.exit(0)

    main()