FlawedLLM
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -91,7 +91,7 @@ def chunk_it(input_command, item_list):
|
|
| 91 |
### Response:
|
| 92 |
{}"""
|
| 93 |
if item_list is not None:
|
| 94 |
-
item_list = f'''The ItemName should be
|
| 95 |
inputs = tokenizer(
|
| 96 |
[
|
| 97 |
alpaca_prompt.format(
|
|
@@ -135,7 +135,7 @@ def chunk_it(input_command, item_list):
|
|
| 135 |
ReportType (string: "profit", "revenue", "inventory", or Null for all reports)
|
| 136 |
|
| 137 |
{item_list}
|
| 138 |
-
|
| 139 |
|
| 140 |
ALWAYS provide output in a JSON format.''', # instruction
|
| 141 |
input_command, # input
|
|
|
|
| 91 |
### Response:
|
| 92 |
{}"""
|
| 93 |
if item_list is not None:
|
| 94 |
+
item_list = f'''The ItemName should be chosen from the given list : {item_list} , except when adding item. If ItemName does not find anything SIMILAR in the list, then the ItemName should be "Null" '''
|
| 95 |
inputs = tokenizer(
|
| 96 |
[
|
| 97 |
alpaca_prompt.format(
|
|
|
|
| 135 |
ReportType (string: "profit", "revenue", "inventory", or Null for all reports)
|
| 136 |
|
| 137 |
{item_list}
|
| 138 |
+
|
| 139 |
|
| 140 |
ALWAYS provide output in a JSON format.''', # instruction
|
| 141 |
input_command, # input
|