reab5555 commited on
Commit
9763663
·
verified ·
1 Parent(s): f68ad9e

Update processing.py

Browse files
Files changed (1) hide show
  1. processing.py +6 -1
processing.py CHANGED
@@ -107,7 +107,7 @@ def load_text(file_path: str) -> str:
107
  with open(file_path, 'r', encoding='utf-8') as file:
108
  return file.read().strip()
109
 
110
- def truncate_text(text: str, max_tokens: int = 10000) -> str:
111
  words = text.split()
112
  if len(words) > max_tokens:
113
  return ' '.join(words[:max_tokens])
@@ -144,6 +144,11 @@ Please provide a comprehensive analysis for each speaker, including:
144
  Respond with a JSON object containing an array of speaker analyses under the key 'speaker_analyses'. Each speaker analysis should include all four aspects mentioned above.
145
  Analysis:"""
146
 
 
 
 
 
 
147
  response = llm.invoke(prompt)
148
 
149
  print("Raw LLM Model Output:")
 
107
  with open(file_path, 'r', encoding='utf-8') as file:
108
  return file.read().strip()
109
 
110
+ def truncate_text(text: str, max_tokens: int = 16000) -> str:
111
  words = text.split()
112
  if len(words) > max_tokens:
113
  return ' '.join(words[:max_tokens])
 
144
  Respond with a JSON object containing an array of speaker analyses under the key 'speaker_analyses'. Each speaker analysis should include all four aspects mentioned above.
145
  Analysis:"""
146
 
147
+ truncated_input_tokents_count = count_tokens(truncated_input)
148
+ print('truncated_input_tokents_count:', truncated_input_tokents_count)
149
+ input_tokens_count = count_tokens(prompt)
150
+ print('input_tokens_count', input_tokens_count)
151
+
152
  response = llm.invoke(prompt)
153
 
154
  print("Raw LLM Model Output:")