Vartex39 commited on
Commit
44e2364
·
1 Parent(s): 60263a2

Fix: correct max_tokens key and simplify prompt logic

Browse files
Files changed (1) hide show
  1. summarizer.py +1 -1
summarizer.py CHANGED
@@ -54,7 +54,7 @@ def summarize_text(text, mode, model_name="anthropic/claude-3-haiku", lang_mode=
54
  "messages": [
55
  {"role": "user", "content": build_prompt(text, mode, lang_mode, is_table)}
56
  ],
57
- "max_tokens": 800
58
  }
59
 
60
  try:
 
54
  "messages": [
55
  {"role": "user", "content": build_prompt(text, mode, lang_mode, is_table)}
56
  ],
57
+ "max_tokens": 1300
58
  }
59
 
60
  try: