tatianija commited on
Commit
d57cf0d
·
verified ·
1 Parent(s): 6d992fe

Update app.py

Browse files

Removed thinking tags removal

Files changed (1) hide show
  1. app.py +1 -20
app.py CHANGED
@@ -165,14 +165,6 @@ class WebContentFetcher:
165
  results.append(result)
166
  time.sleep(1) # Be respectful to servers
167
  return results
168
-
169
- def remove_thinking_tags(text):
170
- import re
171
- # Remove <think>...</think> blocks
172
- cleaned = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
173
- # Remove thinking markers
174
- cleaned = re.sub(r'<thinking>.*?</thinking>', '', cleaned, flags=re.DOTALL)
175
- return cleaned.strip()
176
 
177
  # --- File Download Utility ---
178
  def download_attachment(url: str, temp_dir: str) -> Optional[str]:
@@ -374,7 +366,7 @@ class IntelligentAgent:
374
  max_tokens=max_tokens,
375
  temperature=temperature
376
  )
377
- return remove_thinking_tags(response.choices[0].message.content.strip())
378
  except Exception as chat_error:
379
  if self.debug:
380
  print(f"Chat completion failed: {chat_error}, trying text generation...")
@@ -386,7 +378,6 @@ class IntelligentAgent:
386
  temperature=temperature,
387
  do_sample=temperature > 0
388
  )
389
- response = remove_thinking_tags(response.strip)
390
  return response.strip()
391
 
392
  except Exception as e:
@@ -655,7 +646,6 @@ Answer:"""
655
 
656
  try:
657
  response = self._chat_completion(answer_prompt, max_tokens=500, temperature=0.3)
658
- response = remove_thinking_tags(response)
659
  return response
660
 
661
  except Exception as e:
@@ -729,7 +719,6 @@ Answer:"""
729
 
730
  try:
731
  response = self._chat_completion(answer_prompt, max_tokens=600, temperature=0.3)
732
- response = remove_thinking_tags(response)
733
  return response
734
 
735
  except Exception as e:
@@ -780,15 +769,12 @@ Answer:"""
780
  if self.debug:
781
  print("Using search-based approach")
782
  answer = self._answer_with_search(question_text, attachment_context)
783
- answer = remove_thinking_tags(answer)
784
  else:
785
  if self.debug:
786
  print("Using LLM-only approach")
787
  answer = self._answer_with_llm(question_text, attachment_context)
788
  print("here")
789
  print(answer)
790
- answer = remove_thinking_tags(answer)
791
- print(answer)
792
  # Cleanup temporary files
793
  if image_files or audio_files or code_files:
794
  try:
@@ -806,7 +792,6 @@ Answer:"""
806
 
807
  if self.debug:
808
  print(f"Agent returning answer: {answer[:100]}...")
809
- answer = remove_thinking_tags(answer)
810
  return answer
811
 
812
  def __call__(self, question: str, image_files: List[str] = None, audio_files: List[str] = None) -> str:
@@ -834,18 +819,15 @@ Answer:"""
834
  if self.debug:
835
  print("Using search-based approach")
836
  answer = self._answer_with_search(question, attachment_context)
837
- answer = remove_thinking_tags(answer)
838
  else:
839
  if self.debug:
840
  print("Using LLM-only approach")
841
  answer = self._answer_with_llm(question, attachment_context)
842
- answer = remove_thinking_tags(answer)
843
  except Exception as e:
844
  answer = f"Sorry, I encountered an error: {e}"
845
 
846
  if self.debug:
847
  print(f"Agent returning answer: {answer[:100]}...")
848
- answer = remove_thinking_tags(answer)
849
  return answer
850
 
851
  def fetch_questions() -> Tuple[str, Optional[pd.DataFrame]]:
@@ -943,7 +925,6 @@ def generate_answers_async(model_name: str = "meta-llama/Llama-3.1-8B-Instruct",
943
  try:
944
  # Use the new method that handles attachments
945
  answer = agent.process_question_with_attachments(question_data)
946
- answer = remove_thinking_tags(answer)
947
  cached_answers[task_id] = {
948
  "question": question_text,
949
  "answer": answer
 
165
  results.append(result)
166
  time.sleep(1) # Be respectful to servers
167
  return results
 
 
 
 
 
 
 
 
168
 
169
  # --- File Download Utility ---
170
  def download_attachment(url: str, temp_dir: str) -> Optional[str]:
 
366
  max_tokens=max_tokens,
367
  temperature=temperature
368
  )
369
+ return response.choices[0].message.content.strip()
370
  except Exception as chat_error:
371
  if self.debug:
372
  print(f"Chat completion failed: {chat_error}, trying text generation...")
 
378
  temperature=temperature,
379
  do_sample=temperature > 0
380
  )
 
381
  return response.strip()
382
 
383
  except Exception as e:
 
646
 
647
  try:
648
  response = self._chat_completion(answer_prompt, max_tokens=500, temperature=0.3)
 
649
  return response
650
 
651
  except Exception as e:
 
719
 
720
  try:
721
  response = self._chat_completion(answer_prompt, max_tokens=600, temperature=0.3)
 
722
  return response
723
 
724
  except Exception as e:
 
769
  if self.debug:
770
  print("Using search-based approach")
771
  answer = self._answer_with_search(question_text, attachment_context)
 
772
  else:
773
  if self.debug:
774
  print("Using LLM-only approach")
775
  answer = self._answer_with_llm(question_text, attachment_context)
776
  print("here")
777
  print(answer)
 
 
778
  # Cleanup temporary files
779
  if image_files or audio_files or code_files:
780
  try:
 
792
 
793
  if self.debug:
794
  print(f"Agent returning answer: {answer[:100]}...")
 
795
  return answer
796
 
797
  def __call__(self, question: str, image_files: List[str] = None, audio_files: List[str] = None) -> str:
 
819
  if self.debug:
820
  print("Using search-based approach")
821
  answer = self._answer_with_search(question, attachment_context)
 
822
  else:
823
  if self.debug:
824
  print("Using LLM-only approach")
825
  answer = self._answer_with_llm(question, attachment_context)
 
826
  except Exception as e:
827
  answer = f"Sorry, I encountered an error: {e}"
828
 
829
  if self.debug:
830
  print(f"Agent returning answer: {answer[:100]}...")
 
831
  return answer
832
 
833
  def fetch_questions() -> Tuple[str, Optional[pd.DataFrame]]:
 
925
  try:
926
  # Use the new method that handles attachments
927
  answer = agent.process_question_with_attachments(question_data)
 
928
  cached_answers[task_id] = {
929
  "question": question_text,
930
  "answer": answer