init!
Browse files- backend.py +5 -5
backend.py
CHANGED
|
@@ -4,12 +4,12 @@ import io
|
|
| 4 |
import requests
|
| 5 |
import html # For escaping HTML characters
|
| 6 |
from bs4 import BeautifulSoup
|
| 7 |
-
from openai import OpenAI
|
| 8 |
|
| 9 |
# Initialize OpenAI API with Nvidia's Mistral model
|
| 10 |
client = OpenAI(
|
| 11 |
-
base_url="https://
|
| 12 |
-
api_key="
|
| 13 |
)
|
| 14 |
|
| 15 |
def clean_test_case_output(text):
|
|
@@ -86,13 +86,13 @@ def generate_testcases(user_story):
|
|
| 86 |
try:
|
| 87 |
# Call the Nvidia Mistral API with the refined prompt
|
| 88 |
completion = client.chat.completions.create(
|
| 89 |
-
model="
|
| 90 |
messages=[
|
| 91 |
{"role": "user", "content": prompt}
|
| 92 |
],
|
| 93 |
temperature=0.06, # Further lowering temperature for precise and deterministic output
|
| 94 |
top_p=0.5, # Prioritize high-probability tokens even more
|
| 95 |
-
|
| 96 |
stream=True # Streaming the response for faster retrieval
|
| 97 |
)
|
| 98 |
|
|
|
|
| 4 |
import requests
|
| 5 |
import html # For escaping HTML characters
|
| 6 |
from bs4 import BeautifulSoup
|
| 7 |
+
from openai import OpenAI
|
| 8 |
|
| 9 |
# Initialize OpenAI API with Nvidia's Mistral model
|
| 10 |
client = OpenAI(
|
| 11 |
+
base_url="https://api.llama-api.com",
|
| 12 |
+
api_key="LA-38879bbd57704371a151ce66b6186aa6e95ab69a3b974f5ca4459f2ce89c4de1"
|
| 13 |
)
|
| 14 |
|
| 15 |
def clean_test_case_output(text):
|
|
|
|
| 86 |
try:
|
| 87 |
# Call the Nvidia Mistral API with the refined prompt
|
| 88 |
completion = client.chat.completions.create(
|
| 89 |
+
model="llama3.1-70b", # Using Mistral model
|
| 90 |
messages=[
|
| 91 |
{"role": "user", "content": prompt}
|
| 92 |
],
|
| 93 |
temperature=0.06, # Further lowering temperature for precise and deterministic output
|
| 94 |
top_p=0.5, # Prioritize high-probability tokens even more
|
| 95 |
+
max_token=4096, # Increase max tokens to allow longer content
|
| 96 |
stream=True # Streaming the response for faster retrieval
|
| 97 |
)
|
| 98 |
|