Update backend.py
Browse files- backend.py +3 -3
backend.py
CHANGED
|
@@ -34,7 +34,7 @@ def generate_testcases(user_story):
|
|
| 34 |
# Few-shot learning examples to guide the model
|
| 35 |
few_shot_examples = """
|
| 36 |
|
| 37 |
-
"App is Tech360 iOS ,generate minimum 10 testcases but can you can create more than 10 if possible with clear steps and
|
| 38 |
|
| 39 |
Example 1:
|
| 40 |
User Story:
|
|
@@ -93,8 +93,8 @@ def generate_testcases(user_story):
|
|
| 93 |
messages=[
|
| 94 |
{"role": "user", "content": prompt}
|
| 95 |
],
|
| 96 |
-
temperature=0
|
| 97 |
-
top_p=0.
|
| 98 |
max_tokens=4096, # Increase max tokens to allow longer content
|
| 99 |
stream=True # Streaming the response for faster retrieval
|
| 100 |
)
|
|
|
|
| 34 |
# Few-shot learning examples to guide the model
|
| 35 |
few_shot_examples = """
|
| 36 |
|
| 37 |
+
"App is Tech360 iOS ,generate minimum 10 testcases but can you can create more than 10 if possible with clear steps and understand the user story throughly"
|
| 38 |
|
| 39 |
Example 1:
|
| 40 |
User Story:
|
|
|
|
| 93 |
messages=[
|
| 94 |
{"role": "user", "content": prompt}
|
| 95 |
],
|
| 96 |
+
temperature=1.0, # Further lowering temperature for precise and deterministic output
|
| 97 |
+
top_p=0.7, # Prioritize high-probability tokens even more
|
| 98 |
max_tokens=4096, # Increase max tokens to allow longer content
|
| 99 |
stream=True # Streaming the response for faster retrieval
|
| 100 |
)
|