davanstrien HF Staff commited on
Commit
d07aeb1
·
verified ·
1 Parent(s): 27378f9

Upload examples/uv/dedupe-dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. examples/uv/dedupe-dataset.py +259 -0
examples/uv/dedupe-dataset.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.9"
3
+ # dependencies = [
4
+ # "semhash",
5
+ # "datasets",
6
+ # "huggingface-hub",
7
+ # "hf-transfer",
8
+ # "hf-xet",
9
+ # ]
10
+ # ///
11
+ """Deduplicate a Hugging Face dataset using SemHash.
12
+
13
+ This script uses semantic deduplication to remove duplicate entries from a dataset
14
+ based on a specified text column, then pushes the results to a new dataset repository.
15
+ """
16
+
17
+ import argparse
18
+ import os
19
+ import sys
20
+ from datetime import datetime
21
+ from typing import Optional
22
+
23
+ from datasets import Dataset, load_dataset
24
+ from huggingface_hub import DatasetCard
25
+ from semhash import SemHash
26
+
27
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = (
28
+ "1" # Enable HF transfer to speed up transfers
29
+ )
30
+
31
+
32
+ def parse_args():
33
+ """Parse command line arguments."""
34
+ parser = argparse.ArgumentParser(
35
+ description="Deduplicate a Hugging Face dataset using semantic similarity"
36
+ )
37
+ parser.add_argument(
38
+ "dataset_id",
39
+ type=str,
40
+ help="Source dataset ID (e.g., 'imdb', 'squad', 'username/dataset-name')",
41
+ )
42
+ parser.add_argument(
43
+ "column",
44
+ type=str,
45
+ help="Column name to deduplicate on (e.g., 'text', 'question', 'context')",
46
+ )
47
+ parser.add_argument(
48
+ "repo_id",
49
+ type=str,
50
+ help="Target repository ID for deduplicated dataset (e.g., 'username/my-deduplicated-dataset')",
51
+ )
52
+ parser.add_argument(
53
+ "--split",
54
+ type=str,
55
+ default="train",
56
+ help="Dataset split to process (default: train)",
57
+ )
58
+ parser.add_argument(
59
+ "--threshold",
60
+ type=float,
61
+ default=None,
62
+ help="Similarity threshold for deduplication (0-1, default: auto)",
63
+ )
64
+ parser.add_argument(
65
+ "--method",
66
+ type=str,
67
+ choices=["deduplicate", "filter_outliers", "find_representative"],
68
+ default="deduplicate",
69
+ help="Deduplication method to use (default: deduplicate)",
70
+ )
71
+ parser.add_argument(
72
+ "--private",
73
+ action="store_true",
74
+ help="Make the output dataset private",
75
+ )
76
+ parser.add_argument(
77
+ "--max-samples",
78
+ type=int,
79
+ default=None,
80
+ help="Maximum number of samples to process (for testing)",
81
+ )
82
+
83
+ return parser.parse_args()
84
+
85
+
86
+ def create_dataset_card(
87
+ original_dataset_id: str,
88
+ column: str,
89
+ method: str,
90
+ duplicate_ratio: float,
91
+ original_size: int,
92
+ deduplicated_size: int,
93
+ threshold: Optional[float] = None,
94
+ ) -> str:
95
+ """Create a dataset card with deduplication information."""
96
+ card_content = f"""---
97
+ tags:
98
+ - deduplicated
99
+ - semhash
100
+ - semantic-deduplication
101
+ - hfjobs
102
+ ---
103
+
104
+ # Deduplicated {original_dataset_id}
105
+
106
+ This dataset is a deduplicated version of [{original_dataset_id}](https://huggingface.co/datasets/{original_dataset_id})
107
+ using semantic deduplication with [SemHash](https://github.com/MinishLab/semhash).
108
+
109
+ ## Deduplication Details
110
+
111
+ - **Method**: {method}
112
+ - **Column**: `{column}`
113
+ - **Original size**: {original_size:,} samples
114
+ - **Deduplicated size**: {deduplicated_size:,} samples
115
+ - **Duplicate ratio**: {duplicate_ratio:.2%}
116
+ - **Reduction**: {(1 - deduplicated_size / original_size):.2%}
117
+ """
118
+
119
+ if threshold is not None:
120
+ card_content += f"- **Similarity threshold**: {threshold}\n"
121
+
122
+ card_content += f"""
123
+ - **Date processed**: {datetime.now().strftime("%Y-%m-%d")}
124
+
125
+ ## How to use
126
+
127
+ ```python
128
+ from datasets import load_dataset
129
+
130
+ dataset = load_dataset("{original_dataset_id.split("/")[-1]}-deduplicated")
131
+ ```
132
+
133
+ ## Processing script
134
+
135
+ This dataset was created using the following script:
136
+
137
+ ```bash
138
+ uv run dedupe-dataset.py {original_dataset_id} {column} <repo_id> --method {method}
139
+ ```
140
+
141
+ ## About semantic deduplication
142
+
143
+ Unlike exact deduplication, semantic deduplication identifies and removes samples that are
144
+ semantically similar even if they use different words. This helps create cleaner training
145
+ datasets and prevents data leakage between train/test splits.
146
+ """
147
+
148
+ return card_content
149
+
150
+
151
+ def main():
152
+ """Main function to run deduplication."""
153
+ args = parse_args()
154
+
155
+ # Check for HF token
156
+ token = os.environ.get("HF_TOKEN")
157
+ if not token:
158
+ print(
159
+ "Warning: HF_TOKEN not found in environment. You may not be able to push to private repos."
160
+ )
161
+
162
+ # Load dataset
163
+ print(f"Loading dataset '{args.dataset_id}' (split: {args.split})...")
164
+ try:
165
+ if args.max_samples:
166
+ dataset = load_dataset(
167
+ args.dataset_id, split=f"{args.split}[:{args.max_samples}]", token=token
168
+ )
169
+ else:
170
+ dataset = load_dataset(args.dataset_id, split=args.split, token=token)
171
+ except Exception as e:
172
+ print(f"Error loading dataset: {e}")
173
+ sys.exit(1)
174
+
175
+ # Validate column exists
176
+ if args.column not in dataset.column_names:
177
+ print(f"Error: Column '{args.column}' not found in dataset.")
178
+ print(f"Available columns: {', '.join(dataset.column_names)}")
179
+ sys.exit(1)
180
+
181
+ # Convert dataset to records for semhash
182
+ print(f"Preparing dataset for deduplication on column '{args.column}'...")
183
+ records = [dict(row) for row in dataset]
184
+ original_size = len(records)
185
+ print(f"Found {original_size:,} samples")
186
+
187
+ # Initialize SemHash with the specific column
188
+ print("Initializing SemHash with default model...")
189
+ semhash = SemHash.from_records(records=records, columns=[args.column])
190
+
191
+ # Apply selected method
192
+ print(f"Applying {args.method} method...")
193
+ if args.method == "deduplicate":
194
+ if args.threshold:
195
+ result = semhash.self_deduplicate(threshold=args.threshold)
196
+ else:
197
+ result = semhash.self_deduplicate()
198
+ elif args.method == "filter_outliers":
199
+ result = semhash.self_filter_outliers()
200
+ elif args.method == "find_representative":
201
+ result = semhash.self_find_representative()
202
+
203
+ # Get deduplicated records
204
+ deduplicated_records = result.selected
205
+ deduplicated_size = len(deduplicated_records)
206
+
207
+ # Print statistics
208
+ print("\nDeduplication complete!")
209
+ print(f"Original size: {original_size:,}")
210
+ print(f"Deduplicated size: {deduplicated_size:,}")
211
+ print(
212
+ f"Removed: {original_size - deduplicated_size:,} ({result.duplicate_ratio:.2%})"
213
+ )
214
+
215
+ # Create new dataset from deduplicated records
216
+ print("\nCreating deduplicated dataset...")
217
+ deduplicated_dataset = Dataset.from_list(deduplicated_records)
218
+
219
+ # Push dataset to hub first (this creates the repo)
220
+ print(f"\nPushing deduplicated dataset to '{args.repo_id}'...")
221
+ try:
222
+ deduplicated_dataset.push_to_hub(
223
+ args.repo_id,
224
+ private=args.private,
225
+ token=token,
226
+ commit_message=f"Add deduplicated version of {args.dataset_id}",
227
+ )
228
+ print("Dataset pushed successfully!")
229
+
230
+ # Create and push dataset card
231
+ print("Creating and pushing dataset card...")
232
+ card_content = create_dataset_card(
233
+ original_dataset_id=args.dataset_id,
234
+ column=args.column,
235
+ method=args.method,
236
+ duplicate_ratio=result.duplicate_ratio,
237
+ original_size=original_size,
238
+ deduplicated_size=deduplicated_size,
239
+ threshold=args.threshold,
240
+ )
241
+
242
+ card = DatasetCard(card_content)
243
+ card.push_to_hub(
244
+ repo_id=args.repo_id,
245
+ repo_type="dataset",
246
+ token=token,
247
+ commit_message="Add dataset card",
248
+ )
249
+
250
+ print(
251
+ f"\nSuccess! Dataset available at: https://huggingface.co/datasets/{args.repo_id}"
252
+ )
253
+ except Exception as e:
254
+ print(f"Error: {e}")
255
+ sys.exit(1)
256
+
257
+
258
+ if __name__ == "__main__":
259
+ main()