bibibi12345 commited on
Commit
df1784a
·
1 Parent(s): 0a33ddd

removed debugging logs

Browse files
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. app/api_helpers.py +4 -4
  3. app/openai_handler.py +11 -11
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app/api_helpers.py CHANGED
@@ -465,10 +465,10 @@ async def openai_fake_stream_generator( # Reverted signature: removed thought_ta
465
  print(f"INFO: OpenAI Direct Fake-Streaming - Applying tag extraction with fixed marker: '{VERTEX_REASONING_TAG}'")
466
  # Unconditionally attempt extraction with the fixed tag
467
  reasoning_text, actual_content_text = extract_reasoning_by_tags(actual_content_text, VERTEX_REASONING_TAG)
468
- if reasoning_text:
469
- print(f"DEBUG: Tag extraction success (fixed tag). Reasoning len: {len(reasoning_text)}, Content len: {len(actual_content_text)}")
470
- else:
471
- print(f"DEBUG: No content found within fixed tag '{VERTEX_REASONING_TAG}'.")
472
  else:
473
  print(f"WARNING: OpenAI Direct Fake-Streaming - No initial content found in message.")
474
  actual_content_text = "" # Ensure empty string
 
465
  print(f"INFO: OpenAI Direct Fake-Streaming - Applying tag extraction with fixed marker: '{VERTEX_REASONING_TAG}'")
466
  # Unconditionally attempt extraction with the fixed tag
467
  reasoning_text, actual_content_text = extract_reasoning_by_tags(actual_content_text, VERTEX_REASONING_TAG)
468
+ # if reasoning_text:
469
+ # print(f"DEBUG: Tag extraction success (fixed tag). Reasoning len: {len(reasoning_text)}, Content len: {len(actual_content_text)}")
470
+ # else:
471
+ # print(f"DEBUG: No content found within fixed tag '{VERTEX_REASONING_TAG}'.")
472
  else:
473
  print(f"WARNING: OpenAI Direct Fake-Streaming - No initial content found in message.")
474
  actual_content_text = "" # Ensure empty string
app/openai_handler.py CHANGED
@@ -138,13 +138,13 @@ class OpenAIDirectHandler:
138
 
139
  content = delta.get('content', '')
140
  if content:
141
- print(f"DEBUG: Chunk {chunk_count} - Raw content: '{content}'")
142
  # Use the processor to extract reasoning
143
  processed_content, current_reasoning = reasoning_processor.process_chunk(content)
144
 
145
  # Debug logging for processing results
146
- if processed_content or current_reasoning:
147
- print(f"DEBUG: Chunk {chunk_count} - Processed content: '{processed_content}', Reasoning: '{current_reasoning[:50]}...' if len(current_reasoning) > 50 else '{current_reasoning}'")
148
 
149
  # Send chunks for both reasoning and content as they arrive
150
  chunks_to_send = []
@@ -183,16 +183,16 @@ class OpenAIDirectHandler:
183
  return
184
 
185
  # Debug logging for buffer state and chunk count
186
- print(f"DEBUG: Stream ended after {chunk_count} chunks. Buffer state - tag_buffer: '{reasoning_processor.tag_buffer}', "
187
- f"inside_tag: {reasoning_processor.inside_tag}, "
188
- f"reasoning_buffer: '{reasoning_processor.reasoning_buffer[:50]}...' if reasoning_processor.reasoning_buffer else ''")
189
 
190
  # Flush any remaining buffered content
191
  remaining_content, remaining_reasoning = reasoning_processor.flush_remaining()
192
 
193
  # Send any remaining reasoning first
194
  if remaining_reasoning:
195
- print(f"DEBUG: Flushing remaining reasoning: '{remaining_reasoning[:50]}...' if len(remaining_reasoning) > 50 else '{remaining_reasoning}'")
196
  reasoning_chunk = {
197
  "id": f"chatcmpl-{int(time.time())}",
198
  "object": "chat.completion.chunk",
@@ -204,7 +204,7 @@ class OpenAIDirectHandler:
204
 
205
  # Send any remaining content
206
  if remaining_content:
207
- print(f"DEBUG: Flushing remaining content: '{remaining_content}'")
208
  final_chunk = {
209
  "id": f"chatcmpl-{int(time.time())}",
210
  "object": "chat.completion.chunk",
@@ -273,9 +273,9 @@ class OpenAIDirectHandler:
273
  message_dict['content'] = actual_content
274
  if reasoning_text:
275
  message_dict['reasoning_content'] = reasoning_text
276
- print(f"DEBUG: Tag extraction success. Reasoning len: {len(reasoning_text)}, Content len: {len(actual_content)}")
277
- else:
278
- print(f"DEBUG: No content found within fixed tag '{VERTEX_REASONING_TAG}'.")
279
  else:
280
  print(f"WARNING: OpenAI Direct Non-Streaming - No initial content found in message.")
281
  message_dict['content'] = ""
 
138
 
139
  content = delta.get('content', '')
140
  if content:
141
+ # print(f"DEBUG: Chunk {chunk_count} - Raw content: '{content}'")
142
  # Use the processor to extract reasoning
143
  processed_content, current_reasoning = reasoning_processor.process_chunk(content)
144
 
145
  # Debug logging for processing results
146
+ # if processed_content or current_reasoning:
147
+ # print(f"DEBUG: Chunk {chunk_count} - Processed content: '{processed_content}', Reasoning: '{current_reasoning[:50]}...' if len(current_reasoning) > 50 else '{current_reasoning}'")
148
 
149
  # Send chunks for both reasoning and content as they arrive
150
  chunks_to_send = []
 
183
  return
184
 
185
  # Debug logging for buffer state and chunk count
186
+ # print(f"DEBUG: Stream ended after {chunk_count} chunks. Buffer state - tag_buffer: '{reasoning_processor.tag_buffer}', "
187
+ # f"inside_tag: {reasoning_processor.inside_tag}, "
188
+ # f"reasoning_buffer: '{reasoning_processor.reasoning_buffer[:50]}...' if reasoning_processor.reasoning_buffer else ''")
189
 
190
  # Flush any remaining buffered content
191
  remaining_content, remaining_reasoning = reasoning_processor.flush_remaining()
192
 
193
  # Send any remaining reasoning first
194
  if remaining_reasoning:
195
+ # print(f"DEBUG: Flushing remaining reasoning: '{remaining_reasoning[:50]}...' if len(remaining_reasoning) > 50 else '{remaining_reasoning}'")
196
  reasoning_chunk = {
197
  "id": f"chatcmpl-{int(time.time())}",
198
  "object": "chat.completion.chunk",
 
204
 
205
  # Send any remaining content
206
  if remaining_content:
207
+ # print(f"DEBUG: Flushing remaining content: '{remaining_content}'")
208
  final_chunk = {
209
  "id": f"chatcmpl-{int(time.time())}",
210
  "object": "chat.completion.chunk",
 
273
  message_dict['content'] = actual_content
274
  if reasoning_text:
275
  message_dict['reasoning_content'] = reasoning_text
276
+ # print(f"DEBUG: Tag extraction success. Reasoning len: {len(reasoning_text)}, Content len: {len(actual_content)}")
277
+ # else:
278
+ # print(f"DEBUG: No content found within fixed tag '{VERTEX_REASONING_TAG}'.")
279
  else:
280
  print(f"WARNING: OpenAI Direct Non-Streaming - No initial content found in message.")
281
  message_dict['content'] = ""