File size: 8,136 Bytes
25d2eb7 2827b8a 7a1cd7a 4f0286f 2827b8a f5eb405 3b4c438 f5eb405 3b4c438 f5eb405 3b4c438 f5eb405 6b0e834 7a1cd7a 892ceeb 7a1cd7a 4f0286f 2827b8a 7a1cd7a 7ed3881 3b4c438 892ceeb 2827b8a f5eb405 3bd0812 9f13004 f5eb405 3b4c438 9f13004 f5eb405 3bd0812 4b1ac5a 9f13004 4b1ac5a 3bd0812 4b1ac5a 9f13004 4f0286f a9118ee 3bd0812 f5eb405 9f13004 4f0286f 6b0e834 3b4c438 3bd0812 5422464 3bd0812 5422464 3bd0812 5422464 3bd0812 9f13004 3bd0812 f5eb405 4f0286f f5eb405 6b0e834 3b4c438 a9118ee 4f0286f 20f4a6e 4f0286f 20f4a6e 4f0286f 20f4a6e 4f0286f 20f4a6e 4f0286f 20f4a6e 4f0286f 20f4a6e 4f0286f 20f4a6e 4f0286f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
import gradio as gr
from datasets import load_dataset
import numpy as np
from model2vec import StaticModel
from reach import Reach
from difflib import ndiff
import concurrent.futures
# Load the model at startup
model = StaticModel.from_pretrained("minishlab/M2V_base_output")
# Update default dataset to 'sst2' and set default threshold to 0.9
default_dataset1_name = "sst2"
default_dataset1_split = "train"
default_dataset2_name = "sst2"
default_dataset2_split = "validation"
default_text_column = "sentence"
default_threshold = 0.9
# Load the default datasets at startup
ds_default1 = load_dataset(default_dataset1_name, split=default_dataset1_split)
ds_default2 = load_dataset(default_dataset2_name, split=default_dataset2_split)
def batch_iterable(iterable, batch_size):
"""Helper function to create batches from an iterable."""
for i in range(0, len(iterable), batch_size):
yield iterable[i:i + batch_size]
def display_word_differences(x: str, y: str) -> str:
diff = ndiff(x.split(), y.split())
return " ".join([word for word in diff if word.startswith(('+', '-'))])
def perform_deduplication(
deduplication_type,
dataset1_name,
dataset1_split,
dataset1_text_column,
dataset2_name="",
dataset2_split="",
dataset2_text_column="",
threshold=default_threshold,
progress=gr.Progress(track_tqdm=True)
):
try:
# Convert threshold to float
threshold = float(threshold)
# Initialize status message
status = ""
if deduplication_type == "Single dataset":
# Load Dataset 1
status = "Loading Dataset 1..."
yield status, ""
if dataset1_name == default_dataset1_name and dataset1_split == default_dataset1_split:
ds = ds_default1
else:
ds = load_dataset(dataset1_name, split=dataset1_split)
# Extract texts
status = "Extracting texts from Dataset 1..."
yield status, ""
texts = [example[dataset1_text_column] for example in ds]
# Compute embeddings
status = "Computing embeddings for Dataset 1..."
yield status, ""
embeddings = []
batch_size = 64
total_batches = (len(texts) + batch_size - 1) // batch_size
for batch_texts in progress.tqdm(batch_iterable(texts, batch_size), desc="Computing embeddings for Dataset 1", total=total_batches):
batch_embeddings = model.encode(batch_texts, show_progressbar=False)
embeddings.append(batch_embeddings)
embedding_matrix = np.concatenate(embeddings, axis=0)
# Deduplicate
status = "Deduplicating embeddings..."
yield status, ""
deduplicated_indices, duplicate_to_original_mapping = deduplicate(
embedding_matrix, threshold, progress=progress
)
# Prepare the results
num_duplicates = len(duplicate_to_original_mapping)
num_total = len(texts)
num_deduplicated = len(deduplicated_indices)
result_text = f"**Total documents:** {num_total}\n"
result_text += f"**Number of duplicates found:** {num_duplicates}\n"
result_text += f"**Number of unique documents after deduplication:** {num_deduplicated}\n\n"
# Show deduplicated examples
if num_duplicates > 0:
result_text += "**Examples of duplicates found:**\n\n"
num_examples = min(5, num_duplicates)
for duplicate_idx, original_idx in list(duplicate_to_original_mapping.items())[:num_examples]:
original_text = texts[original_idx]
duplicate_text = texts[duplicate_idx]
differences = display_word_differences(original_text, duplicate_text)
result_text += f"**Original text:**\n{original_text}\n\n"
result_text += f"**Duplicate text:**\n{duplicate_text}\n\n"
result_text += f"**Differences:**\n{differences}\n"
result_text += "-" * 50 + "\n\n"
else:
result_text += "No duplicates found."
# Final status
status = "Deduplication completed."
yield status, result_text
elif deduplication_type == "Cross-dataset":
# Similar code for cross-dataset deduplication
# Implement similar logic as above for cross-dataset
pass
except Exception as e:
yield f"An error occurred: {e}", ""
raise e
def deduplicate(embedding_matrix: np.ndarray, threshold: float, batch_size: int = 1024, progress=None) -> tuple[np.ndarray, dict[int, int]]:
"""
Deduplicate embeddings and return the deduplicated indices and a mapping of removed indices to their corresponding original indices.
"""
# Building the index
progress(0, desc="Building search index...")
reach = Reach(vectors=embedding_matrix, items=[str(i) for i in range(len(embedding_matrix))])
deduplicated_indices = set(range(len(embedding_matrix)))
duplicate_to_original_mapping = {}
# Finding nearest neighbors
progress(0, desc="Finding nearest neighbors...")
results = reach.nearest_neighbor_threshold(
embedding_matrix,
threshold=threshold,
batch_size=batch_size,
show_progressbar=False # Disable internal progress bar
)
# Processing duplicates with a progress bar
total_items = len(embedding_matrix)
for i, similar_items in enumerate(progress.tqdm(results, desc="Processing duplicates", total=total_items)):
if i not in deduplicated_indices:
continue
similar_indices = [int(item[0]) for item in similar_items if int(item[0]) != i]
for sim_idx in similar_indices:
if sim_idx in deduplicated_indices:
deduplicated_indices.remove(sim_idx)
duplicate_to_original_mapping[sim_idx] = i
return np.array(list(deduplicated_indices)), duplicate_to_original_mapping
with gr.Blocks() as demo:
gr.Markdown("# Semantic Deduplication")
deduplication_type = gr.Radio(
choices=["Single dataset", "Cross-dataset"],
label="Deduplication Type",
value="Single dataset"
)
with gr.Row():
dataset1_name = gr.Textbox(value=default_dataset1_name, label="Dataset 1 Name")
dataset1_split = gr.Textbox(value=default_dataset1_split, label="Dataset 1 Split")
dataset1_text_column = gr.Textbox(value=default_text_column, label="Text Column Name")
dataset2_inputs = gr.Column(visible=False)
with dataset2_inputs:
gr.Markdown("### Dataset 2")
with gr.Row():
dataset2_name = gr.Textbox(value=default_dataset2_name, label="Dataset 2 Name")
dataset2_split = gr.Textbox(value=default_dataset2_split, label="Dataset 2 Split")
dataset2_text_column = gr.Textbox(value=default_text_column, label="Text Column Name")
threshold = gr.Slider(
minimum=0.0,
maximum=1.0,
value=default_threshold,
label="Similarity Threshold"
)
compute_button = gr.Button("Compute")
status_output = gr.Markdown()
result_output = gr.Markdown()
# Function to update the visibility of dataset2_inputs
def update_visibility(deduplication_type_value):
if deduplication_type_value == "Cross-dataset":
return gr.update(visible=True)
else:
return gr.update(visible=False)
deduplication_type.change(
update_visibility,
inputs=deduplication_type,
outputs=dataset2_inputs
)
compute_button.click(
fn=perform_deduplication,
inputs=[
deduplication_type,
dataset1_name,
dataset1_split,
dataset1_text_column,
dataset2_name,
dataset2_split,
dataset2_text_column,
threshold
],
outputs=[status_output, result_output]
)
demo.launch()
|