spacesedan commited on
Commit
fcdc986
·
1 Parent(s): ed4c020
Files changed (1) hide show
  1. app.py +17 -8
app.py CHANGED
@@ -50,19 +50,28 @@ async def summarize_batch(request: BatchSummarizationRequest):
50
  chunk_map = [] # maps index of chunk to content_id
51
 
52
  for item in request.inputs:
 
53
  chunks = chunk_text(item.text)
54
- logger.info(f"[CHUNKING] content_id={item.content_id} original_len={len(item.text)} num_chunks={len(chunks)}")
55
  all_chunks.extend(chunks)
56
  chunk_map.extend([item.content_id] * len(chunks))
57
 
58
- # Hard-truncate chunks during encoding and decode safely
59
  safe_chunks = []
60
- for chunk in all_chunks:
61
- encoded = tokenizer.encode(chunk, truncation=True, max_length=MAX_MODEL_TOKENS)
62
- if len(encoded) >= MAX_MODEL_TOKENS:
63
- logger.warning(f"[TRUNCATING] Chunk encoded to {len(encoded)} tokens, trimming to {MAX_MODEL_TOKENS}.")
64
- decoded = tokenizer.decode(encoded, skip_special_tokens=True)
 
 
 
65
  safe_chunks.append(decoded)
 
 
 
 
 
66
 
67
  summaries = summarizer(
68
  safe_chunks,
@@ -75,7 +84,7 @@ async def summarize_batch(request: BatchSummarizationRequest):
75
 
76
  # Aggregate summaries back per content_id
77
  summary_map = {}
78
- for content_id, result in zip(chunk_map, summaries):
79
  summary_map.setdefault(content_id, []).append(result["summary_text"])
80
 
81
  response_items = [
 
50
  chunk_map = [] # maps index of chunk to content_id
51
 
52
  for item in request.inputs:
53
+ token_count = len(tokenizer.encode(item.text, truncation=False))
54
  chunks = chunk_text(item.text)
55
+ logger.info(f"[CHUNKING] content_id={item.content_id} token_len={token_count} num_chunks={len(chunks)}")
56
  all_chunks.extend(chunks)
57
  chunk_map.extend([item.content_id] * len(chunks))
58
 
59
+ # Retokenize and only allow chunks that are safely below the max token limit
60
  safe_chunks = []
61
+ safe_chunk_map = []
62
+ for content_id, chunk in zip(chunk_map, all_chunks):
63
+ encoded = tokenizer(chunk, return_tensors="pt", truncation=True, max_length=MAX_MODEL_TOKENS)
64
+ token_count = encoded["input_ids"].shape[1]
65
+ if token_count > MAX_MODEL_TOKENS:
66
+ logger.warning(f"[SKIP] content_id={content_id} Chunk too long after truncation: {token_count} tokens")
67
+ continue
68
+ decoded = tokenizer.decode(encoded["input_ids"][0], skip_special_tokens=True)
69
  safe_chunks.append(decoded)
70
+ safe_chunk_map.append(content_id)
71
+
72
+ if not safe_chunks:
73
+ logger.error("No valid chunks after token filtering. Returning empty response.")
74
+ return {"summaries": []}
75
 
76
  summaries = summarizer(
77
  safe_chunks,
 
84
 
85
  # Aggregate summaries back per content_id
86
  summary_map = {}
87
+ for content_id, result in zip(safe_chunk_map, summaries):
88
  summary_map.setdefault(content_id, []).append(result["summary_text"])
89
 
90
  response_items = [