ruslanmv commited on
Commit
c1c0b76
·
verified ·
1 Parent(s): 5a48c62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -62,6 +62,9 @@ def parse_cv(file, job_description):
62
  if text.startswith("Error"):
63
  return text # Return extraction error if any.
64
 
 
 
 
65
  prompt = (
66
  f"Analyze the following CV against the provided job description. "
67
  f"Provide a summary, an assessment of fit, and a score from 0 to 10.\n\n"
@@ -70,7 +73,8 @@ def parse_cv(file, job_description):
70
  )
71
 
72
  try:
73
- response = client.text_generation(prompt, max_tokens=512)
 
74
  except Exception as e:
75
  return f"Error during CV analysis: {e}"
76
 
@@ -90,9 +94,10 @@ def respond(message, history: list[tuple[str, str]], system_message, max_tokens,
90
  response = ""
91
  try:
92
  # Stream response tokens from the chat completion endpoint.
 
93
  for message_chunk in client.chat_completion(
94
  messages,
95
- max_tokens=max_tokens,
96
  stream=True,
97
  temperature=temperature,
98
  top_p=top_p,
 
62
  if text.startswith("Error"):
63
  return text # Return extraction error if any.
64
 
65
+ # Print the extracted CV text
66
+ print("Extracted CV text (before sending to LLM):\n", text)
67
+
68
  prompt = (
69
  f"Analyze the following CV against the provided job description. "
70
  f"Provide a summary, an assessment of fit, and a score from 0 to 10.\n\n"
 
73
  )
74
 
75
  try:
76
+ # Use 'max_new_tokens' instead of 'max_tokens'
77
+ response = client.text_generation(prompt, max_new_tokens=512)
78
  except Exception as e:
79
  return f"Error during CV analysis: {e}"
80
 
 
94
  response = ""
95
  try:
96
  # Stream response tokens from the chat completion endpoint.
97
+ # Replace 'max_tokens' with 'max_new_tokens'
98
  for message_chunk in client.chat_completion(
99
  messages,
100
+ max_new_tokens=max_tokens,
101
  stream=True,
102
  temperature=temperature,
103
  top_p=top_p,