csc525_retrieval_based_chatbot / tf_data_pipeline.py
JoeArmani
restructuring
71ca212
raw
history blame
38 kB
import os
import numpy as np
import faiss
import tensorflow as tf
import h5py
import math
from tqdm import tqdm
import json
from pathlib import Path
from typing import Union, Optional, Dict, List, Tuple, Generator
from transformers import AutoTokenizer
from typing import List, Tuple, Generator
from transformers import AutoTokenizer
import random
from logger_config import config_logger
logger = config_logger(__name__)
class TFDataPipeline:
def __init__(
self,
config,
tokenizer,
encoder,
index_file_path: str,
response_pool: List[str],
max_length: int,
query_embeddings_cache: dict,
neg_samples: int = 5,
index_type: str = 'IndexFlatIP',
nlist: int = 100,
max_retries: int = 3
):
self.config = config
self.tokenizer = tokenizer
self.encoder = encoder
self.index_file_path = index_file_path
self.response_pool = response_pool
self.max_length = max_length
self.neg_samples = neg_samples
self.query_embeddings_cache = query_embeddings_cache # In-memory cache for embeddings
self.index_type = index_type
self.nlist = nlist
self.embedding_batch_size = 16 if len(response_pool) < 100 else 64
self.search_batch_size = 16 if len(response_pool) < 100 else 64
self.max_batch_size = 16 if len(response_pool) < 100 else 64
self.max_retries = max_retries
# Build a quick text->domain map for O(1) domain lookups
self._text_domain_map = {}
self.build_text_to_domain_map()
if os.path.exists(index_file_path):
logger.info(f"Loading existing FAISS index from {index_file_path}...")
self.index = faiss.read_index(index_file_path)
self.validate_faiss_index()
logger.info("FAISS index loaded and validated successfully.")
else:
# Initialize FAISS index
dimension = self.encoder.config.embedding_dim
self.index = faiss.IndexFlatIP(dimension)
logger.info(f"Initialized FAISS IndexFlatIP with dimension {dimension}.")
if not self.index.is_trained:
# Train the index if it's not trained. # TODO: Replace 'dimension' with embedding size
dimension = self.query_embeddings_cache[next(iter(self.query_embeddings_cache))].shape[0]
self.index.train(np.array(list(self.query_embeddings_cache.values())).astype(np.float32))
self.index.add(np.array(list(self.query_embeddings_cache.values())).astype(np.float32))
def save_embeddings_cache_hdf5(self, cache_file_path: str):
"""Save the embeddings cache to an HDF5 file."""
with h5py.File(cache_file_path, 'w') as hf:
for query, emb in self.query_embeddings_cache.items():
hf.create_dataset(query, data=emb)
logger.info(f"Embeddings cache saved to {cache_file_path}.")
def load_embeddings_cache_hdf5(self, cache_file_path: str):
"""Load the embeddings cache from an HDF5 file."""
with h5py.File(cache_file_path, 'r') as hf:
for query in hf.keys():
self.query_embeddings_cache[query] = hf[query][:]
logger.info(f"Embeddings cache loaded from {cache_file_path}.")
def save_faiss_index(self, index_file_path: str):
faiss.write_index(self.index, index_file_path)
logger.info(f"FAISS index saved to {index_file_path}")
def load_faiss_index(self, index_file_path: str):
"""Load the FAISS index from the specified file path."""
if os.path.exists(index_file_path):
self.index = faiss.read_index(index_file_path)
logger.info(f"FAISS index loaded from {index_file_path}.")
else:
logger.error(f"FAISS index file not found at {index_file_path}.")
raise FileNotFoundError(f"FAISS index file not found at {index_file_path}.")
def validate_faiss_index(self):
"""Validates that the FAISS index has the correct dimensionality."""
expected_dim = self.encoder.config.embedding_dim
if self.index.d != expected_dim:
logger.error(f"FAISS index dimension {self.index.d} does not match encoder embedding dimension {expected_dim}.")
raise ValueError("FAISS index dimensionality mismatch.")
logger.info("FAISS index dimension validated successfully.")
def save_tokenizer(self, tokenizer_dir: str):
self.tokenizer.save_pretrained(tokenizer_dir)
logger.info(f"Tokenizer saved to {tokenizer_dir}")
def load_tokenizer(self, tokenizer_dir: str):
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)
logger.info(f"Tokenizer loaded from {tokenizer_dir}")
@staticmethod
def load_json_training_data(data_path: Union[str, Path], debug_samples: Optional[int] = None) -> List[dict]:
"""
Load training data from a JSON file.
Args:
data_path (Union[str, Path]): Path to the JSON file containing dialogues.
debug_samples (Optional[int]): Number of samples to load for debugging.
Returns:
List[dict]: List of dialogue dictionaries.
"""
logger.info(f"Loading training data from {data_path}...")
data_path = Path(data_path)
if not data_path.exists():
logger.error(f"Data file {data_path} does not exist.")
return []
with open(data_path, 'r', encoding='utf-8') as f:
dialogues = json.load(f)
if debug_samples is not None:
dialogues = dialogues[:debug_samples]
logger.info(f"Debug mode: Limited to {debug_samples} dialogues")
logger.info(f"Loaded {len(dialogues)} dialogues.")
return dialogues
def collect_responses_with_domain(self, dialogues: List[dict]) -> List[Dict[str, str]]:
"""
Extract unique assistant responses from dialogues, along with the domain.
Returns a list of dicts: [{'domain': str, 'text': str}, ...]
"""
response_set = set() # We'll store (domain, text) tuples to keep them unique
results = []
for dialogue in tqdm(dialogues, desc="Processing Dialogues", unit="dialogue"):
# domain is stored at the top level in your new JSON format
domain = dialogue.get('domain', 'other')
turns = dialogue.get('turns', [])
for turn in turns:
speaker = turn.get('speaker')
text = turn.get('text', '').strip()
if speaker == 'assistant' and text:
if len(text) <= self.max_length:
# Use a tuple as a "set" key to ensure uniqueness
key = (domain, text)
if key not in response_set:
response_set.add(key)
results.append({
"domain": domain,
"text": text
})
logger.info(f"Collected {len(results)} unique assistant responses from dialogues.")
return results
# def collect_responses(self, dialogues: List[dict]) -> List[str]:
# """Extract unique assistant responses from dialogues."""
# response_set = set()
# for dialogue in tqdm(dialogues, desc="Processing Dialogues", unit="dialogue"):
# turns = dialogue.get('turns', [])
# for turn in turns:
# speaker = turn.get('speaker')
# text = turn.get('text', '').strip()
# if speaker == 'assistant' and text:
# # Ensure we don't exclude valid shorter responses
# if len(text) <= self.max_length:
# response_set.add(text)
# logger.info(f"Collected {len(response_set)} unique assistant responses from dialogues.")
# return list(response_set)
def _extract_pairs_from_dialogue(self, dialogue: dict) -> List[Tuple[str, str]]:
"""Extract query-response pairs from a dialogue."""
pairs = []
turns = dialogue.get('turns', [])
for i in range(len(turns) - 1):
current_turn = turns[i]
next_turn = turns[i+1]
if (current_turn.get('speaker') == 'user' and
next_turn.get('speaker') == 'assistant' and
'text' in current_turn and
'text' in next_turn):
query = current_turn['text'].strip()
positive = next_turn['text'].strip()
pairs.append((query, positive))
return pairs
def compute_and_index_response_embeddings(self):
"""
Computes embeddings for the response pool and adds them to the FAISS index.
self.response_pool is now List[Dict[str, str]] with keys "domain" and "text".
"""
logger.info("Computing embeddings for the response pool...")
# Extract just the assistant text
texts = [resp["text"] for resp in self.response_pool]
logger.debug(f"Total texts to embed: {len(texts)}")
batch_size = getattr(self, 'embedding_batch_size', 64)
embeddings = []
with tqdm(total=len(texts), desc="Computing Embeddings", unit="response") as pbar:
for i in range(0, len(texts), batch_size):
batch_texts = texts[i:i+batch_size]
encodings = self.tokenizer(
batch_texts,
padding=True,
truncation=True,
max_length=self.max_length,
return_tensors='tf'
)
batch_embeds = self.encoder(encodings['input_ids'], training=False).numpy()
embeddings.append(batch_embeds)
pbar.update(len(batch_texts))
# Combine embeddings and add to FAISS
all_embeddings = np.vstack(embeddings).astype(np.float32)
logger.info(f"Adding {len(all_embeddings)} response embeddings to FAISS index...")
self.index.add(all_embeddings)
# For debugging or repeated usage, you might store them:
self.response_embeddings = all_embeddings
logger.info(f"FAISS index now has {self.index.ntotal} vectors.")
def _find_hard_negatives_batch(self, queries: List[str], positives: List[str]) -> List[List[str]]:
"""
Find hard negatives for a batch of queries using FAISS search.
Falls back to random negatives if we run out of tries or can't find enough.
Uses domain-based fallback if possible.
"""
import random
import gc
retry_count = 0
total_responses = len(self.response_pool)
k = self.neg_samples # Number of negatives to retrieve from FAISS
batch_size = 128
while retry_count < self.max_retries:
try:
# 1) Build query embeddings from the cache
query_embeddings = []
for i in range(0, len(queries), batch_size):
sub_queries = queries[i : i + batch_size]
sub_embeds = [self.query_embeddings_cache[q] for q in sub_queries]
sub_embeds = np.vstack(sub_embeds).astype(np.float32)
faiss.normalize_L2(sub_embeds) # If not already normalized
query_embeddings.append(sub_embeds)
query_embeddings = np.vstack(query_embeddings)
query_embeddings = np.ascontiguousarray(query_embeddings)
# 2) Perform FAISS search
distances, indices = self.index.search(query_embeddings, k)
all_negatives = []
# For each query, find domain from the corresponding positive if possible
for query_indices, query_text, pos_text in zip(indices, queries, positives):
negative_list = []
seen = {pos_text.strip()}
# Attempt to detect the domain of the positive text
domain_of_positive = self._detect_domain_for_text(pos_text)
# Collect hard negatives from FAISS
for idx in query_indices:
if 0 <= idx < total_responses:
candidate_dict = self.response_pool[idx] # e.g. {domain, text}
candidate_text = candidate_dict["text"].strip()
if candidate_text and candidate_text not in seen:
seen.add(candidate_text)
negative_list.append(candidate_text)
if len(negative_list) >= self.neg_samples:
break
# If not enough negatives, fallback to random domain-based
if len(negative_list) < self.neg_samples:
needed = self.neg_samples - len(negative_list)
# Pass in domain_of_positive to your updated `_get_random_negatives(...)`
random_negatives = self._get_random_negatives(needed, seen, domain=domain_of_positive)
negative_list.extend(random_negatives)
all_negatives.append(negative_list)
return all_negatives
except KeyError as ke:
retry_count += 1
logger.warning(f"Hard negative search attempt {retry_count} failed due to missing embeddings: {ke}")
if retry_count == self.max_retries:
logger.error("Max retries reached for hard negative search due to missing embeddings.")
return self._fallback_negatives(queries, positives, reason="key_error")
gc.collect()
if tf.config.list_physical_devices('GPU'):
tf.keras.backend.clear_session()
except Exception as e:
retry_count += 1
logger.warning(f"Hard negative search attempt {retry_count} failed: {e}")
if retry_count == self.max_retries:
logger.error("Max retries reached for hard negative search.")
return self._fallback_negatives(queries, positives, reason="generic_error")
gc.collect()
if tf.config.list_physical_devices('GPU'):
tf.keras.backend.clear_session()
def _detect_domain_for_text(self, text: str) -> Optional[str]:
"""
O(1) domain detection by looking up text in our dictionary.
Returns the domain if found, else None.
"""
stripped_text = text.strip()
return self._text_domain_map.get(stripped_text, None)
def _get_random_negatives(self, needed: int, seen: set, domain: Optional[str] = None) -> List[str]:
"""
Return a list of 'needed' random negative texts from the same domain if possible,
otherwise fallback to all-domain.
"""
# 1) Filter response_pool for domain if provided
if domain:
domain_texts = [r["text"] for r in self.response_pool if r["domain"] == domain]
# fallback to entire set if insufficient domain_texts
if len(domain_texts) < needed * 2: # pick some threshold
domain_texts = [r["text"] for r in self.response_pool]
else:
domain_texts = [r["text"] for r in self.response_pool]
negatives = []
tries = 0
max_tries = needed * 10
while len(negatives) < needed and tries < max_tries:
tries += 1
candidate = random.choice(domain_texts).strip()
if candidate and candidate not in seen:
negatives.append(candidate)
seen.add(candidate)
# If still not enough, we do the best we can
if len(negatives) < needed:
logger.warning(f"Could not find enough domain-based random negatives; needed {needed}, got {len(negatives)}.")
return negatives
def _fallback_negatives(self, queries: List[str], positives: List[str], reason: str) -> List[List[str]]:
"""
Called if FAISS fails or embeddings are missing.
We use entirely random negatives for each query, ignoring FAISS,
but still attempt domain-based selection if possible.
"""
logger.error(f"Falling back to random negatives due to: {reason}")
all_negatives = []
for pos_text in positives:
# Build a 'seen' set with the positive
seen = {pos_text.strip()}
# Attempt to detect the domain of the positive text
domain_of_positive = self._detect_domain_for_text(pos_text)
# Use domain-based random negatives if available
negs = self._get_random_negatives(self.neg_samples, seen, domain=domain_of_positive)
all_negatives.append(negs)
return all_negatives
def build_text_to_domain_map(self):
"""
Build an O(1) lookup dict: text -> domain,
so we don't have to scan the entire self.response_pool each time.
"""
self._text_domain_map = {}
for item in self.response_pool:
# e.g., item = {"domain": "restaurant", "text": "some text..."}
stripped_text = item["text"].strip()
domain = item["domain"]
# If the same text appears multiple times with the same domain, no big deal.
# If it appears with a different domain, you can decide how to handle collisions.
if stripped_text in self._text_domain_map:
existing_domain = self._text_domain_map[stripped_text]
if existing_domain != domain:
# Log a warning or decide on a policy:
logger.warning(
f"Collision detected: text '{stripped_text}' found with domains "
f"'{existing_domain}' and '{domain}'. Keeping the first."
)
# By default, keep the first domain or overwrite. We'll skip overwriting:
continue
else:
# Insert into the dict
self._text_domain_map[stripped_text] = domain
logger.info(f"Built text->domain map with {len(self._text_domain_map)} unique text entries.")
def encode_query(
self,
query: str,
context: Optional[List[Tuple[str, str]]] = None
) -> np.ndarray:
"""
Encode a user query (and optional conversation context) into an embedding vector.
Args:
query: The user query.
context: Optional conversation history as a list of (user_text, assistant_text).
Returns:
np.ndarray of shape [embedding_dim], typically L2-normalized already.
"""
# 1) Prepare context (if any) by concatenating user/assistant pairs
if context:
# Take the last N turns
relevant_history = context[-self.config.max_context_turns:]
context_str_parts = []
for (u_text, a_text) in relevant_history:
context_str_parts.append(
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<USER>')]} {u_text} "
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<ASSISTANT>')]} {a_text}"
)
context_str = " ".join(context_str_parts)
# Append the user's new query
full_query = (
f"{context_str} "
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<USER>')]} {query}"
)
else:
# Just a single user turn
full_query = (
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<USER>')]} {query}"
)
# 2) Tokenize
encodings = self.tokenizer(
[full_query],
padding='max_length',
truncation=True,
max_length=self.max_length,
return_tensors='np' # to keep it compatible with FAISS
)
input_ids = encodings['input_ids']
# 3) Check for out-of-vocab IDs
max_id = np.max(input_ids)
vocab_size = len(self.tokenizer)
if max_id >= vocab_size:
logger.error(f"Token ID {max_id} exceeds tokenizer vocab size {vocab_size}.")
raise ValueError("Token ID exceeds vocabulary size.")
# 4) Get embeddings from the model
embeddings = self.encoder(input_ids, training=False).numpy()
# Typically your custom model already L2-normalizes the final embeddings.
# 5) Return the single embedding as 1D array
return embeddings[0]
def encode_responses(
self,
responses: List[str],
context: Optional[List[Tuple[str, str]]] = None
) -> np.ndarray:
"""
Encode multiple response texts into embedding vectors.
Args:
responses: List of raw assistant responses.
context: Optional conversation context (last N turns).
Returns:
np.ndarray of shape [num_responses, embedding_dim].
"""
# 1) If you want to incorporate context into response encoding
# Usually for retrieval we might skip this. But if you want it:
if context:
relevant_history = context[-self.config.max_context_turns:]
prepared = []
for resp in responses:
context_str_parts = []
for (u_text, a_text) in relevant_history:
context_str_parts.append(
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<USER>')]} {u_text} "
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<ASSISTANT>')]} {a_text}"
)
context_str = " ".join(context_str_parts)
# Now treat resp as an assistant turn
full_resp = (
f"{context_str} "
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<ASSISTANT>')]} {resp}"
)
prepared.append(full_resp)
else:
# By default, just mark each response as from the assistant
prepared = [
f"{self.tokenizer.additional_special_tokens[self.tokenizer.additional_special_tokens.index('<ASSISTANT>')]} {r}"
for r in responses
]
# 2) Tokenize
encodings = self.tokenizer(
prepared,
padding='max_length',
truncation=True,
max_length=self.max_length,
return_tensors='np'
)
input_ids = encodings['input_ids']
# 3) Check for out-of-vocab
max_id = np.max(input_ids)
vocab_size = len(self.tokenizer)
if max_id >= vocab_size:
logger.error(f"Token ID {max_id} exceeds tokenizer vocab size {vocab_size}.")
raise ValueError("Token ID exceeds vocabulary size.")
# 4) Model forward
embeddings = self.encoder(input_ids, training=False).numpy()
# Typically already L2-normalized if your final layer is normalized.
return embeddings.astype('float32')
def prepare_and_save_data(self, dialogues: List[dict], tf_record_path: str, batch_size: int = 32):
"""
Processes dialogues in batches and saves to a TFRecord file using optimized batch tokenization and encoding.
Args:
dialogues (List[dict]): List of dialogue dictionaries.
tf_record_path (str): Path to save the TFRecord file.
batch_size (int): Number of dialogues to process per batch.
"""
logger.info(f"Preparing and saving data to {tf_record_path}...")
num_dialogues = len(dialogues)
num_batches = math.ceil(num_dialogues / batch_size)
with tf.io.TFRecordWriter(tf_record_path) as writer:
# Initialize progress bar
with tqdm(total=num_batches, desc="Preparing Data Batches", unit="batch") as pbar:
for i in range(num_batches):
start_idx = i * batch_size
end_idx = min(start_idx + batch_size, num_dialogues)
batch_dialogues = dialogues[start_idx:end_idx]
# Extract all query-positive pairs in the batch
queries = []
positives = []
for dialogue in batch_dialogues:
pairs = self._extract_pairs_from_dialogue(dialogue)
for query, positive in pairs:
if len(query) <= self.max_length and len(positive) <= self.max_length:
queries.append(query)
positives.append(positive)
if not queries:
pbar.update(1)
continue # Skip if no valid queries
# Compute and cache query embeddings
try:
self._compute_embeddings(queries)
except Exception as e:
logger.error(f"Error computing embeddings: {e}")
pbar.update(1)
continue # Skip to the next batch
# Find hard negatives for the batch
try:
hard_negatives = self._find_hard_negatives_batch(queries, positives)
except Exception as e:
logger.error(f"Error finding hard negatives: {e}")
pbar.update(1)
continue # Skip to the next batch
# Tokenize and encode all queries, positives, and negatives in the batch
try:
encoded_queries = self.tokenizer.batch_encode_plus(
queries,
max_length=self.config.max_context_token_limit,
truncation=True,
padding='max_length',
return_tensors='tf'
)
encoded_positives = self.tokenizer.batch_encode_plus(
positives,
max_length=self.config.max_context_token_limit,
truncation=True,
padding='max_length',
return_tensors='tf'
)
except Exception as e:
logger.error(f"Error during tokenization: {e}")
pbar.update(1)
continue # Skip to the next batch
# Flatten hard_negatives while maintaining alignment
# Assuming hard_negatives is a list of lists, where each sublist corresponds to a query
try:
flattened_negatives = [neg for sublist in hard_negatives for neg in sublist]
encoded_negatives = self.tokenizer.batch_encode_plus(
flattened_negatives,
max_length=self.config.max_context_token_limit,
truncation=True,
padding='max_length',
return_tensors='tf'
)
# Reshape encoded_negatives['input_ids'] to [num_queries, num_negatives, max_length]
num_negatives = self.config.neg_samples
reshaped_negatives = encoded_negatives['input_ids'].numpy().reshape(-1, num_negatives, self.config.max_context_token_limit)
except Exception as e:
logger.error(f"Error during negatives tokenization: {e}")
pbar.update(1)
continue # Skip to the next batch
# Serialize each example and write to TFRecord
for j in range(len(queries)):
try:
q_id = encoded_queries['input_ids'][j].numpy()
p_id = encoded_positives['input_ids'][j].numpy()
n_id = reshaped_negatives[j]
feature = {
'query_ids': tf.train.Feature(int64_list=tf.train.Int64List(value=q_id)),
'positive_ids': tf.train.Feature(int64_list=tf.train.Int64List(value=p_id)),
'negative_ids': tf.train.Feature(int64_list=tf.train.Int64List(value=n_id.flatten())),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
except Exception as e:
logger.error(f"Error serializing example {j} in batch {i}: {e}")
continue # Skip to the next example
# Update progress bar
pbar.update(1)
logger.info(f"Data preparation complete. TFRecord saved.")
def _compute_embeddings(self, queries: List[str]) -> None:
new_queries = [q for q in queries if q not in self.query_embeddings_cache]
if not new_queries:
return # All queries already cached
# Compute embeddings for new queries
new_embeddings = []
for i in range(0, len(new_queries), self.embedding_batch_size):
batch_queries = new_queries[i:i + self.embedding_batch_size]
encoded = self.tokenizer(
batch_queries,
padding=True,
truncation=True,
max_length=self.max_length,
return_tensors='tf'
)
batch_embeddings = self.encoder(encoded['input_ids'], training=False).numpy()
faiss.normalize_L2(batch_embeddings)
new_embeddings.extend(batch_embeddings)
# Update the cache
for query, emb in zip(new_queries, new_embeddings):
self.query_embeddings_cache[query] = emb
def data_generator(self, dialogues: List[dict]) -> Generator[Tuple[str, str, List[str]], None, None]:
"""
Generates training examples: (query, positive, hard_negatives).
Wrapped the outer loop with tqdm for progress tracking.
"""
total_dialogues = len(dialogues)
logger.debug(f"Total dialogues to process: {total_dialogues}")
# Initialize tqdm progress bar
with tqdm(total=total_dialogues, desc="Processing Dialogues", unit="dialogue") as pbar:
for dialogue in dialogues:
pairs = self._extract_pairs_from_dialogue(dialogue)
for query, positive in pairs:
# Ensure embeddings are computed, find hard negatives, etc.
self._compute_embeddings([query])
hard_negatives = self._find_hard_negatives_batch([query], [positive])[0]
yield (query, positive, hard_negatives)
pbar.update(1)
def get_tf_dataset(self, dialogues: List[dict], batch_size: int) -> tf.data.Dataset:
"""
Creates a tf.data.Dataset for streaming training that yields
(input_ids_query, input_ids_positive, input_ids_negatives).
"""
# 1) Start with a generator dataset
dataset = tf.data.Dataset.from_generator(
lambda: self.data_generator(dialogues),
output_signature=(
tf.TensorSpec(shape=(), dtype=tf.string), # Query (single string)
tf.TensorSpec(shape=(), dtype=tf.string), # Positive (single string)
tf.TensorSpec(shape=(self.neg_samples,), dtype=tf.string) # Hard Negatives (list of strings)
)
)
# 2) Batch the raw strings
dataset = dataset.batch(batch_size, drop_remainder=True)
# 3) Map them through a tokenize step using `tf.py_function`
dataset = dataset.map(
lambda q, p, n: self._tokenize_triple(q, p, n),
num_parallel_calls=1 #tf.data.AUTOTUNE
)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
def _tokenize_triple(
self,
q: tf.Tensor,
p: tf.Tensor,
n: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""
Wraps a Python function via tf.py_function to convert tf.Tensors of strings
-> Python lists of strings -> HF tokenizer -> Tensors of IDs.
q is shape [batch_size], p is shape [batch_size],
n is shape [batch_size, neg_samples] (i.e., each row is a list of negatives).
"""
# Use tf.py_function with limited parallelism
q_ids, p_ids, n_ids = tf.py_function(
func=self._tokenize_triple_py,
inp=[q, p, n, tf.constant(self.max_length), tf.constant(self.neg_samples)],
Tout=[tf.int32, tf.int32, tf.int32]
)
# Manually set shape information
q_ids.set_shape([None, self.max_length]) # [batch_size, max_length]
p_ids.set_shape([None, self.max_length]) # [batch_size, max_length]
n_ids.set_shape([None, self.neg_samples, self.max_length]) # [batch_size, neg_samples, max_length]
return q_ids, p_ids, n_ids
def _tokenize_triple_py(
self,
q: tf.Tensor,
p: tf.Tensor,
n: tf.Tensor,
max_len: tf.Tensor,
neg_samples: tf.Tensor
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Python function that:
- Decodes each tf.string Tensor to a Python list of strings
- Calls the HF tokenizer
- Reshapes negatives
- Returns np.array of int32s for (q_ids, p_ids, n_ids).
q: shape [batch_size], p: shape [batch_size]
n: shape [batch_size, neg_samples]
max_len: scalar int
neg_samples: scalar int
"""
max_len = int(max_len.numpy()) # Convert to Python int
neg_samples = int(neg_samples.numpy())
# 1) Convert Tensors -> Python lists of strings
q_list = [q_i.decode("utf-8") for q_i in q.numpy()] # shape [batch_size]
p_list = [p_i.decode("utf-8") for p_i in p.numpy()] # shape [batch_size]
# shape [batch_size, neg_samples], decode each row
n_list = []
for row in n.numpy():
# row is shape [neg_samples], each is a tf.string
decoded = [neg.decode("utf-8") for neg in row]
n_list.append(decoded)
# 2) Tokenize queries & positives
q_enc = self.tokenizer(
q_list,
padding="max_length",
truncation=True,
max_length=max_len,
return_tensors="np"
)
p_enc = self.tokenizer(
p_list,
padding="max_length",
truncation=True,
max_length=max_len,
return_tensors="np"
)
# 3) Tokenize negatives
# Flatten [batch_size, neg_samples] -> single list
flattened_negatives = [neg for row in n_list for neg in row]
if len(flattened_negatives) == 0:
# No negatives at all: return a zero array
n_ids = np.zeros((len(q_list), neg_samples, max_len), dtype=np.int32)
else:
n_enc = self.tokenizer(
flattened_negatives,
padding="max_length",
truncation=True,
max_length=max_len,
return_tensors="np"
)
# shape [batch_size * neg_samples, max_len]
n_input_ids = n_enc["input_ids"]
# We want to reshape to [batch_size, neg_samples, max_len]
# Handle cases where there might be fewer negatives
batch_size = len(q_list)
n_ids_list = []
for i in range(batch_size):
start_idx = i * neg_samples
end_idx = start_idx + neg_samples
row_negs = n_input_ids[start_idx:end_idx]
# If fewer negatives, pad with zeros
if row_negs.shape[0] < neg_samples:
deficit = neg_samples - row_negs.shape[0]
pad_arr = np.zeros((deficit, max_len), dtype=np.int32)
row_negs = np.concatenate([row_negs, pad_arr], axis=0)
n_ids_list.append(row_negs)
# stack them -> shape [batch_size, neg_samples, max_len]
n_ids = np.stack(n_ids_list, axis=0)
# 4) Return as np.int32 arrays
q_ids = q_enc["input_ids"].astype(np.int32) # shape [batch_size, max_len]
p_ids = p_enc["input_ids"].astype(np.int32) # shape [batch_size, max_len]
n_ids = n_ids.astype(np.int32) # shape [batch_size, neg_samples, max_len]
return q_ids, p_ids, n_ids