Commit
·
eb5a4ea
1
Parent(s):
a875142
ruff format
Browse files- classify-dataset.py +46 -40
- generate-responses.py +15 -12
classify-dataset.py
CHANGED
@@ -54,7 +54,9 @@ from toolz import concat, keymap, partition_all
|
|
54 |
from tqdm.auto import tqdm
|
55 |
from vllm import LLM
|
56 |
|
57 |
-
logging.basicConfig(
|
|
|
|
|
58 |
logger = logging.getLogger(__name__)
|
59 |
|
60 |
|
@@ -62,9 +64,11 @@ def check_gpu_availability():
|
|
62 |
"""Check if CUDA is available and log GPU information."""
|
63 |
if not torch.cuda.is_available():
|
64 |
logger.error("CUDA is not available. This script requires a GPU.")
|
65 |
-
logger.error(
|
|
|
|
|
66 |
sys.exit(1)
|
67 |
-
|
68 |
gpu_name = torch.cuda.get_device_name(0)
|
69 |
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
|
70 |
logger.info(f"GPU detected: {gpu_name} with {gpu_memory:.1f} GB memory")
|
@@ -75,25 +79,24 @@ def get_model_id2label(hub_model_id: str) -> Optional[dict[int, str]]:
|
|
75 |
"""Extract label mapping from model's config.json on Hugging Face Hub."""
|
76 |
try:
|
77 |
response = httpx.get(
|
78 |
-
hf_hub_url(hub_model_id, filename="config.json"),
|
79 |
-
follow_redirects=True
|
80 |
)
|
81 |
if response.status_code != 200:
|
82 |
logger.warning(f"Could not fetch config.json for {hub_model_id}")
|
83 |
return None
|
84 |
-
|
85 |
data = response.json()
|
86 |
id2label = data.get("id2label")
|
87 |
-
|
88 |
if id2label is None:
|
89 |
logger.info("No id2label mapping found in config.json")
|
90 |
return None
|
91 |
-
|
92 |
# Convert string keys to integers
|
93 |
label_map = keymap(int, id2label)
|
94 |
logger.info(f"Found label mapping: {label_map}")
|
95 |
return label_map
|
96 |
-
|
97 |
except Exception as e:
|
98 |
logger.warning(f"Failed to parse config.json: {e}")
|
99 |
return None
|
@@ -102,11 +105,11 @@ def get_model_id2label(hub_model_id: str) -> Optional[dict[int, str]]:
|
|
102 |
def get_top_label(output, label_map: Optional[dict[int, str]] = None):
|
103 |
"""
|
104 |
Extract the top predicted label and confidence score from vLLM output.
|
105 |
-
|
106 |
Args:
|
107 |
output: vLLM ClassificationRequestOutput
|
108 |
label_map: Optional mapping from label indices to label names
|
109 |
-
|
110 |
Returns:
|
111 |
Tuple of (label, confidence_score)
|
112 |
"""
|
@@ -114,7 +117,7 @@ def get_top_label(output, label_map: Optional[dict[int, str]] = None):
|
|
114 |
probs = F.softmax(logits, dim=0)
|
115 |
top_idx = torch.argmax(probs).item()
|
116 |
top_prob = probs[top_idx].item()
|
117 |
-
|
118 |
# Use label name if mapping available, otherwise use index
|
119 |
label = label_map.get(top_idx, str(top_idx)) if label_map else str(top_idx)
|
120 |
return label, top_prob
|
@@ -130,7 +133,7 @@ def main(
|
|
130 |
):
|
131 |
"""
|
132 |
Main classification pipeline.
|
133 |
-
|
134 |
Args:
|
135 |
hub_model_id: Hugging Face model ID for classification
|
136 |
src_dataset_hub_id: Input dataset on Hugging Face Hub
|
@@ -141,58 +144,62 @@ def main(
|
|
141 |
"""
|
142 |
# GPU check
|
143 |
check_gpu_availability()
|
144 |
-
|
145 |
# Authentication
|
146 |
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
|
147 |
if HF_TOKEN:
|
148 |
login(token=HF_TOKEN)
|
149 |
else:
|
150 |
-
logger.error(
|
|
|
|
|
151 |
sys.exit(1)
|
152 |
-
|
153 |
# Initialize vLLM with classification task
|
154 |
logger.info(f"Loading model: {hub_model_id}")
|
155 |
llm = LLM(model=hub_model_id, task="classify")
|
156 |
-
|
157 |
# Get label mapping if available
|
158 |
id2label = get_model_id2label(hub_model_id)
|
159 |
-
|
160 |
# Load dataset
|
161 |
logger.info(f"Loading dataset: {src_dataset_hub_id}")
|
162 |
dataset = load_dataset(src_dataset_hub_id, split="train")
|
163 |
total_examples = len(dataset)
|
164 |
logger.info(f"Dataset loaded with {total_examples:,} examples")
|
165 |
-
|
166 |
# Extract text column
|
167 |
if inference_column not in dataset.column_names:
|
168 |
-
logger.error(
|
|
|
|
|
169 |
sys.exit(1)
|
170 |
-
|
171 |
prompts = dataset[inference_column]
|
172 |
-
|
173 |
# Process in batches
|
174 |
logger.info(f"Starting classification with batch size {batch_size:,}")
|
175 |
all_results = []
|
176 |
-
|
177 |
for batch in tqdm(
|
178 |
list(partition_all(batch_size, prompts)),
|
179 |
desc="Processing batches",
|
180 |
-
unit="batch"
|
181 |
):
|
182 |
batch_results = llm.classify(batch)
|
183 |
all_results.append(batch_results)
|
184 |
-
|
185 |
# Flatten results
|
186 |
outputs = list(concat(all_results))
|
187 |
-
|
188 |
# Extract labels and probabilities
|
189 |
logger.info("Extracting predictions...")
|
190 |
labels_and_probs = [get_top_label(output, id2label) for output in outputs]
|
191 |
-
|
192 |
# Add results to dataset
|
193 |
dataset = dataset.add_column("label", [label for label, _ in labels_and_probs])
|
194 |
dataset = dataset.add_column("prob", [prob for _, prob in labels_and_probs])
|
195 |
-
|
196 |
# Push to hub
|
197 |
logger.info(f"Pushing results to: {output_dataset_hub_id}")
|
198 |
dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
|
@@ -216,41 +223,40 @@ Examples:
|
|
216 |
|
217 |
# Using environment variable for token
|
218 |
HF_TOKEN=hf_xxx uv run classify-dataset.py model/name input-dataset output-dataset
|
219 |
-
"""
|
220 |
)
|
221 |
-
|
222 |
parser.add_argument(
|
223 |
"hub_model_id",
|
224 |
-
help="Hugging Face model ID for classification (e.g., bert-base-uncased)"
|
225 |
)
|
226 |
parser.add_argument(
|
227 |
"src_dataset_hub_id",
|
228 |
-
help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)"
|
229 |
)
|
230 |
parser.add_argument(
|
231 |
-
"output_dataset_hub_id",
|
232 |
-
help="Output dataset name on Hugging Face Hub"
|
233 |
)
|
234 |
parser.add_argument(
|
235 |
"--inference-column",
|
236 |
type=str,
|
237 |
default="text",
|
238 |
-
help="Column containing text to classify (default: text)"
|
239 |
)
|
240 |
parser.add_argument(
|
241 |
"--batch-size",
|
242 |
type=int,
|
243 |
default=10_000,
|
244 |
-
help="Batch size for inference (default: 10,000)"
|
245 |
)
|
246 |
parser.add_argument(
|
247 |
"--hf-token",
|
248 |
type=str,
|
249 |
-
help="Hugging Face token (can also use HF_TOKEN env var)"
|
250 |
)
|
251 |
-
|
252 |
args = parser.parse_args()
|
253 |
-
|
254 |
main(
|
255 |
hub_model_id=args.hub_model_id,
|
256 |
src_dataset_hub_id=args.src_dataset_hub_id,
|
@@ -283,4 +289,4 @@ Example HF Jobs command:
|
|
283 |
' \\
|
284 |
--project vllm-classify \\
|
285 |
--name my-classification-job
|
286 |
-
""")
|
|
|
54 |
from tqdm.auto import tqdm
|
55 |
from vllm import LLM
|
56 |
|
57 |
+
logging.basicConfig(
|
58 |
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
59 |
+
)
|
60 |
logger = logging.getLogger(__name__)
|
61 |
|
62 |
|
|
|
64 |
"""Check if CUDA is available and log GPU information."""
|
65 |
if not torch.cuda.is_available():
|
66 |
logger.error("CUDA is not available. This script requires a GPU.")
|
67 |
+
logger.error(
|
68 |
+
"Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
|
69 |
+
)
|
70 |
sys.exit(1)
|
71 |
+
|
72 |
gpu_name = torch.cuda.get_device_name(0)
|
73 |
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
|
74 |
logger.info(f"GPU detected: {gpu_name} with {gpu_memory:.1f} GB memory")
|
|
|
79 |
"""Extract label mapping from model's config.json on Hugging Face Hub."""
|
80 |
try:
|
81 |
response = httpx.get(
|
82 |
+
hf_hub_url(hub_model_id, filename="config.json"), follow_redirects=True
|
|
|
83 |
)
|
84 |
if response.status_code != 200:
|
85 |
logger.warning(f"Could not fetch config.json for {hub_model_id}")
|
86 |
return None
|
87 |
+
|
88 |
data = response.json()
|
89 |
id2label = data.get("id2label")
|
90 |
+
|
91 |
if id2label is None:
|
92 |
logger.info("No id2label mapping found in config.json")
|
93 |
return None
|
94 |
+
|
95 |
# Convert string keys to integers
|
96 |
label_map = keymap(int, id2label)
|
97 |
logger.info(f"Found label mapping: {label_map}")
|
98 |
return label_map
|
99 |
+
|
100 |
except Exception as e:
|
101 |
logger.warning(f"Failed to parse config.json: {e}")
|
102 |
return None
|
|
|
105 |
def get_top_label(output, label_map: Optional[dict[int, str]] = None):
|
106 |
"""
|
107 |
Extract the top predicted label and confidence score from vLLM output.
|
108 |
+
|
109 |
Args:
|
110 |
output: vLLM ClassificationRequestOutput
|
111 |
label_map: Optional mapping from label indices to label names
|
112 |
+
|
113 |
Returns:
|
114 |
Tuple of (label, confidence_score)
|
115 |
"""
|
|
|
117 |
probs = F.softmax(logits, dim=0)
|
118 |
top_idx = torch.argmax(probs).item()
|
119 |
top_prob = probs[top_idx].item()
|
120 |
+
|
121 |
# Use label name if mapping available, otherwise use index
|
122 |
label = label_map.get(top_idx, str(top_idx)) if label_map else str(top_idx)
|
123 |
return label, top_prob
|
|
|
133 |
):
|
134 |
"""
|
135 |
Main classification pipeline.
|
136 |
+
|
137 |
Args:
|
138 |
hub_model_id: Hugging Face model ID for classification
|
139 |
src_dataset_hub_id: Input dataset on Hugging Face Hub
|
|
|
144 |
"""
|
145 |
# GPU check
|
146 |
check_gpu_availability()
|
147 |
+
|
148 |
# Authentication
|
149 |
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
|
150 |
if HF_TOKEN:
|
151 |
login(token=HF_TOKEN)
|
152 |
else:
|
153 |
+
logger.error(
|
154 |
+
"HF_TOKEN is required. Set via --hf-token or HF_TOKEN environment variable."
|
155 |
+
)
|
156 |
sys.exit(1)
|
157 |
+
|
158 |
# Initialize vLLM with classification task
|
159 |
logger.info(f"Loading model: {hub_model_id}")
|
160 |
llm = LLM(model=hub_model_id, task="classify")
|
161 |
+
|
162 |
# Get label mapping if available
|
163 |
id2label = get_model_id2label(hub_model_id)
|
164 |
+
|
165 |
# Load dataset
|
166 |
logger.info(f"Loading dataset: {src_dataset_hub_id}")
|
167 |
dataset = load_dataset(src_dataset_hub_id, split="train")
|
168 |
total_examples = len(dataset)
|
169 |
logger.info(f"Dataset loaded with {total_examples:,} examples")
|
170 |
+
|
171 |
# Extract text column
|
172 |
if inference_column not in dataset.column_names:
|
173 |
+
logger.error(
|
174 |
+
f"Column '{inference_column}' not found. Available columns: {dataset.column_names}"
|
175 |
+
)
|
176 |
sys.exit(1)
|
177 |
+
|
178 |
prompts = dataset[inference_column]
|
179 |
+
|
180 |
# Process in batches
|
181 |
logger.info(f"Starting classification with batch size {batch_size:,}")
|
182 |
all_results = []
|
183 |
+
|
184 |
for batch in tqdm(
|
185 |
list(partition_all(batch_size, prompts)),
|
186 |
desc="Processing batches",
|
187 |
+
unit="batch",
|
188 |
):
|
189 |
batch_results = llm.classify(batch)
|
190 |
all_results.append(batch_results)
|
191 |
+
|
192 |
# Flatten results
|
193 |
outputs = list(concat(all_results))
|
194 |
+
|
195 |
# Extract labels and probabilities
|
196 |
logger.info("Extracting predictions...")
|
197 |
labels_and_probs = [get_top_label(output, id2label) for output in outputs]
|
198 |
+
|
199 |
# Add results to dataset
|
200 |
dataset = dataset.add_column("label", [label for label, _ in labels_and_probs])
|
201 |
dataset = dataset.add_column("prob", [prob for _, prob in labels_and_probs])
|
202 |
+
|
203 |
# Push to hub
|
204 |
logger.info(f"Pushing results to: {output_dataset_hub_id}")
|
205 |
dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
|
|
|
223 |
|
224 |
# Using environment variable for token
|
225 |
HF_TOKEN=hf_xxx uv run classify-dataset.py model/name input-dataset output-dataset
|
226 |
+
""",
|
227 |
)
|
228 |
+
|
229 |
parser.add_argument(
|
230 |
"hub_model_id",
|
231 |
+
help="Hugging Face model ID for classification (e.g., bert-base-uncased)",
|
232 |
)
|
233 |
parser.add_argument(
|
234 |
"src_dataset_hub_id",
|
235 |
+
help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
|
236 |
)
|
237 |
parser.add_argument(
|
238 |
+
"output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
|
|
|
239 |
)
|
240 |
parser.add_argument(
|
241 |
"--inference-column",
|
242 |
type=str,
|
243 |
default="text",
|
244 |
+
help="Column containing text to classify (default: text)",
|
245 |
)
|
246 |
parser.add_argument(
|
247 |
"--batch-size",
|
248 |
type=int,
|
249 |
default=10_000,
|
250 |
+
help="Batch size for inference (default: 10,000)",
|
251 |
)
|
252 |
parser.add_argument(
|
253 |
"--hf-token",
|
254 |
type=str,
|
255 |
+
help="Hugging Face token (can also use HF_TOKEN env var)",
|
256 |
)
|
257 |
+
|
258 |
args = parser.parse_args()
|
259 |
+
|
260 |
main(
|
261 |
hub_model_id=args.hub_model_id,
|
262 |
src_dataset_hub_id=args.src_dataset_hub_id,
|
|
|
289 |
' \\
|
290 |
--project vllm-classify \\
|
291 |
--name my-classification-job
|
292 |
+
""")
|
generate-responses.py
CHANGED
@@ -42,9 +42,8 @@ import logging
|
|
42 |
import os
|
43 |
import sys
|
44 |
from datetime import datetime
|
45 |
-
from typing import
|
46 |
|
47 |
-
import torch
|
48 |
from datasets import load_dataset
|
49 |
from huggingface_hub import DatasetCard, get_token, login
|
50 |
from torch import cuda
|
@@ -105,7 +104,7 @@ def create_dataset_card(
|
|
105 |
- **Max Model Length Used**: {max_model_len_used:,} tokens
|
106 |
|
107 |
Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
|
108 |
-
|
109 |
return f"""---
|
110 |
viewer: false
|
111 |
tags:
|
@@ -160,7 +159,7 @@ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-respons
|
|
160 |
--temperature {sampling_params.temperature} \\
|
161 |
--top-p {sampling_params.top_p} \\
|
162 |
--top-k {sampling_params.top_k} \\
|
163 |
-
--max-tokens {sampling_params.max_tokens}{f
|
164 |
```
|
165 |
"""
|
166 |
|
@@ -287,7 +286,7 @@ def main(
|
|
287 |
valid_prompts = []
|
288 |
valid_indices = []
|
289 |
skipped_info = []
|
290 |
-
|
291 |
for i, example in enumerate(tqdm(dataset, desc="Processing messages")):
|
292 |
messages = example[messages_column]
|
293 |
# Apply chat template
|
@@ -295,7 +294,7 @@ def main(
|
|
295 |
messages, tokenize=False, add_generation_prompt=True
|
296 |
)
|
297 |
all_prompts.append(prompt)
|
298 |
-
|
299 |
# Count tokens if filtering is enabled
|
300 |
if skip_long_prompts:
|
301 |
tokens = tokenizer.encode(prompt)
|
@@ -307,20 +306,24 @@ def main(
|
|
307 |
else:
|
308 |
valid_prompts.append(prompt)
|
309 |
valid_indices.append(i)
|
310 |
-
|
311 |
# Log filtering results
|
312 |
if skip_long_prompts and skipped_info:
|
313 |
-
logger.warning(
|
|
|
|
|
314 |
logger.info("Skipped prompt details (first 10):")
|
315 |
for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]):
|
316 |
-
logger.info(
|
|
|
|
|
317 |
if len(skipped_info) > 10:
|
318 |
logger.info(f" ... and {len(skipped_info) - 10} more")
|
319 |
-
|
320 |
skip_percentage = (len(skipped_info) / total_examples) * 100
|
321 |
if skip_percentage > 10:
|
322 |
logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!")
|
323 |
-
|
324 |
if not valid_prompts:
|
325 |
logger.error("No valid prompts to process after filtering!")
|
326 |
sys.exit(1)
|
@@ -334,7 +337,7 @@ def main(
|
|
334 |
# Extract generated text and create full response list
|
335 |
logger.info("Extracting generated responses...")
|
336 |
responses = [""] * total_examples # Initialize with empty strings
|
337 |
-
|
338 |
for idx, output in enumerate(outputs):
|
339 |
original_idx = valid_indices[idx]
|
340 |
response = output.outputs[0].text.strip()
|
|
|
42 |
import os
|
43 |
import sys
|
44 |
from datetime import datetime
|
45 |
+
from typing import Optional
|
46 |
|
|
|
47 |
from datasets import load_dataset
|
48 |
from huggingface_hub import DatasetCard, get_token, login
|
49 |
from torch import cuda
|
|
|
104 |
- **Max Model Length Used**: {max_model_len_used:,} tokens
|
105 |
|
106 |
Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
|
107 |
+
|
108 |
return f"""---
|
109 |
viewer: false
|
110 |
tags:
|
|
|
159 |
--temperature {sampling_params.temperature} \\
|
160 |
--top-p {sampling_params.top_p} \\
|
161 |
--top-k {sampling_params.top_k} \\
|
162 |
+
--max-tokens {sampling_params.max_tokens}{f" \\\\\\n --max-model-len {max_model_len_used}" if max_model_len_used else ""}
|
163 |
```
|
164 |
"""
|
165 |
|
|
|
286 |
valid_prompts = []
|
287 |
valid_indices = []
|
288 |
skipped_info = []
|
289 |
+
|
290 |
for i, example in enumerate(tqdm(dataset, desc="Processing messages")):
|
291 |
messages = example[messages_column]
|
292 |
# Apply chat template
|
|
|
294 |
messages, tokenize=False, add_generation_prompt=True
|
295 |
)
|
296 |
all_prompts.append(prompt)
|
297 |
+
|
298 |
# Count tokens if filtering is enabled
|
299 |
if skip_long_prompts:
|
300 |
tokens = tokenizer.encode(prompt)
|
|
|
306 |
else:
|
307 |
valid_prompts.append(prompt)
|
308 |
valid_indices.append(i)
|
309 |
+
|
310 |
# Log filtering results
|
311 |
if skip_long_prompts and skipped_info:
|
312 |
+
logger.warning(
|
313 |
+
f"Skipped {len(skipped_info)} prompts that exceed max_model_len ({effective_max_len} tokens)"
|
314 |
+
)
|
315 |
logger.info("Skipped prompt details (first 10):")
|
316 |
for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]):
|
317 |
+
logger.info(
|
318 |
+
f" - Example {prompt_idx}: {token_count} tokens (exceeds by {token_count - effective_max_len})"
|
319 |
+
)
|
320 |
if len(skipped_info) > 10:
|
321 |
logger.info(f" ... and {len(skipped_info) - 10} more")
|
322 |
+
|
323 |
skip_percentage = (len(skipped_info) / total_examples) * 100
|
324 |
if skip_percentage > 10:
|
325 |
logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!")
|
326 |
+
|
327 |
if not valid_prompts:
|
328 |
logger.error("No valid prompts to process after filtering!")
|
329 |
sys.exit(1)
|
|
|
337 |
# Extract generated text and create full response list
|
338 |
logger.info("Extracting generated responses...")
|
339 |
responses = [""] * total_examples # Initialize with empty strings
|
340 |
+
|
341 |
for idx, output in enumerate(outputs):
|
342 |
original_idx = valid_indices[idx]
|
343 |
response = output.outputs[0].text.strip()
|