infinitymatter commited on
Commit
2a2caa1
·
verified ·
1 Parent(s): 892631c

Update src/models.py

Browse files
Files changed (1) hide show
  1. src/models.py +4 -16
src/models.py CHANGED
@@ -18,28 +18,16 @@ claude = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.3", token=anthr
18
 
19
  def get_gpt_completion(prompt, system_message):
20
  try:
21
- response = openai.text_generation(
22
- model=openai,
23
- messages=[
24
- {"role": "system", "content": system_message},
25
- {"role": "user", "content": prompt}
26
- ],
27
- stream=False,
28
- )
29
- return response.choices[0].message.content
30
  except Exception as e:
31
  print(f"GPT error: {e}")
32
  raise
33
 
34
  def get_claude_completion(prompt, system_message):
35
  try:
36
- response = claude.text_generation(
37
- model=claude,
38
- max_tokens=2000,
39
- system=system_message,
40
- messages=[{"role": "user", "content": prompt}]
41
- )
42
- return result.content[0].text
43
  except Exception as e:
44
  print(f"Claude error: {e}")
45
  raise
 
18
 
19
  def get_gpt_completion(prompt, system_message):
20
  try:
21
+ response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
22
+ return response
 
 
 
 
 
 
 
23
  except Exception as e:
24
  print(f"GPT error: {e}")
25
  raise
26
 
27
  def get_claude_completion(prompt, system_message):
28
  try:
29
+ response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
30
+ return response
 
 
 
 
 
31
  except Exception as e:
32
  print(f"Claude error: {e}")
33
  raise