Spaces:
Running
Running
Commit
·
fcdc986
1
Parent(s):
ed4c020
work
Browse files
app.py
CHANGED
@@ -50,19 +50,28 @@ async def summarize_batch(request: BatchSummarizationRequest):
|
|
50 |
chunk_map = [] # maps index of chunk to content_id
|
51 |
|
52 |
for item in request.inputs:
|
|
|
53 |
chunks = chunk_text(item.text)
|
54 |
-
logger.info(f"[CHUNKING] content_id={item.content_id}
|
55 |
all_chunks.extend(chunks)
|
56 |
chunk_map.extend([item.content_id] * len(chunks))
|
57 |
|
58 |
-
#
|
59 |
safe_chunks = []
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
65 |
safe_chunks.append(decoded)
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
summaries = summarizer(
|
68 |
safe_chunks,
|
@@ -75,7 +84,7 @@ async def summarize_batch(request: BatchSummarizationRequest):
|
|
75 |
|
76 |
# Aggregate summaries back per content_id
|
77 |
summary_map = {}
|
78 |
-
for content_id, result in zip(
|
79 |
summary_map.setdefault(content_id, []).append(result["summary_text"])
|
80 |
|
81 |
response_items = [
|
|
|
50 |
chunk_map = [] # maps index of chunk to content_id
|
51 |
|
52 |
for item in request.inputs:
|
53 |
+
token_count = len(tokenizer.encode(item.text, truncation=False))
|
54 |
chunks = chunk_text(item.text)
|
55 |
+
logger.info(f"[CHUNKING] content_id={item.content_id} token_len={token_count} num_chunks={len(chunks)}")
|
56 |
all_chunks.extend(chunks)
|
57 |
chunk_map.extend([item.content_id] * len(chunks))
|
58 |
|
59 |
+
# Retokenize and only allow chunks that are safely below the max token limit
|
60 |
safe_chunks = []
|
61 |
+
safe_chunk_map = []
|
62 |
+
for content_id, chunk in zip(chunk_map, all_chunks):
|
63 |
+
encoded = tokenizer(chunk, return_tensors="pt", truncation=True, max_length=MAX_MODEL_TOKENS)
|
64 |
+
token_count = encoded["input_ids"].shape[1]
|
65 |
+
if token_count > MAX_MODEL_TOKENS:
|
66 |
+
logger.warning(f"[SKIP] content_id={content_id} Chunk too long after truncation: {token_count} tokens")
|
67 |
+
continue
|
68 |
+
decoded = tokenizer.decode(encoded["input_ids"][0], skip_special_tokens=True)
|
69 |
safe_chunks.append(decoded)
|
70 |
+
safe_chunk_map.append(content_id)
|
71 |
+
|
72 |
+
if not safe_chunks:
|
73 |
+
logger.error("No valid chunks after token filtering. Returning empty response.")
|
74 |
+
return {"summaries": []}
|
75 |
|
76 |
summaries = summarizer(
|
77 |
safe_chunks,
|
|
|
84 |
|
85 |
# Aggregate summaries back per content_id
|
86 |
summary_map = {}
|
87 |
+
for content_id, result in zip(safe_chunk_map, summaries):
|
88 |
summary_map.setdefault(content_id, []).append(result["summary_text"])
|
89 |
|
90 |
response_items = [
|