Spaces:
Running
Running
Update webchat.py
Browse files- webchat.py +12 -6
webchat.py
CHANGED
@@ -21,20 +21,24 @@ from utils import chromadb_client
|
|
21 |
# this approach for the ease of demo setup. In a production application these variables
|
22 |
# can be stored in an .env or a properties file
|
23 |
|
24 |
-
|
25 |
-
url = "https://us-south.ml.cloud.ibm.com"
|
26 |
|
27 |
# These global variables will be updated in get_credentials() function
|
28 |
watsonx_project_id = ""
|
29 |
# Replace with your IBM Cloud key
|
30 |
api_key = ""
|
31 |
|
|
|
|
|
|
|
|
|
32 |
def get_credentials():
|
33 |
|
34 |
load_dotenv()
|
35 |
# Update the global variables that will be used for authentication in another function
|
36 |
globals()["api_key"] = os.getenv("api_key", None)
|
37 |
globals()["watsonx_project_id"] = os.getenv("project_id", None)
|
|
|
38 |
|
39 |
# The get_model function creates an LLM model object with the specified parameters
|
40 |
|
@@ -53,7 +57,7 @@ def get_model(model_type, max_tokens, min_tokens, decoding, temperature, top_k,
|
|
53 |
params=generate_params,
|
54 |
credentials={
|
55 |
"apikey": api_key,
|
56 |
-
"url":
|
57 |
},
|
58 |
project_id=watsonx_project_id
|
59 |
)
|
@@ -73,7 +77,7 @@ def get_model_test(model_type, max_tokens, min_tokens, decoding, temperature):
|
|
73 |
params=generate_params,
|
74 |
credentials={
|
75 |
"apikey": api_key,
|
76 |
-
"url":
|
77 |
},
|
78 |
project_id=watsonx_project_id
|
79 |
)
|
@@ -218,13 +222,15 @@ def main():
|
|
218 |
# question = "Can an EV be plugged in to a household outlet?"
|
219 |
collection_name = "test_web_RAG"
|
220 |
|
221 |
-
answer_questions_from_web(api_key, watsonx_project_id, url, question, collection_name,client)
|
222 |
|
223 |
|
224 |
-
def answer_questions_from_web(request_api_key, request_project_id, url, question, collection_name,client):
|
225 |
# Update the global variable
|
226 |
globals()["api_key"] = request_api_key
|
227 |
globals()["watsonx_project_id"] = request_project_id
|
|
|
|
|
228 |
|
229 |
# Specify model parameters
|
230 |
model_type = "meta-llama/llama-2-70b-chat"
|
|
|
21 |
# this approach for the ease of demo setup. In a production application these variables
|
22 |
# can be stored in an .env or a properties file
|
23 |
|
24 |
+
|
|
|
25 |
|
26 |
# These global variables will be updated in get_credentials() function
|
27 |
watsonx_project_id = ""
|
28 |
# Replace with your IBM Cloud key
|
29 |
api_key = ""
|
30 |
|
31 |
+
# Replace with your endpoint ie. "https://us-south.ml.cloud.ibm.com"
|
32 |
+
watsonx_url = ""
|
33 |
+
|
34 |
+
|
35 |
def get_credentials():
|
36 |
|
37 |
load_dotenv()
|
38 |
# Update the global variables that will be used for authentication in another function
|
39 |
globals()["api_key"] = os.getenv("api_key", None)
|
40 |
globals()["watsonx_project_id"] = os.getenv("project_id", None)
|
41 |
+
globals()["watsonx_url"] = os.getenv("watsonx_url", None)
|
42 |
|
43 |
# The get_model function creates an LLM model object with the specified parameters
|
44 |
|
|
|
57 |
params=generate_params,
|
58 |
credentials={
|
59 |
"apikey": api_key,
|
60 |
+
"url": watsonx_url
|
61 |
},
|
62 |
project_id=watsonx_project_id
|
63 |
)
|
|
|
77 |
params=generate_params,
|
78 |
credentials={
|
79 |
"apikey": api_key,
|
80 |
+
"url": watsonx_url
|
81 |
},
|
82 |
project_id=watsonx_project_id
|
83 |
)
|
|
|
222 |
# question = "Can an EV be plugged in to a household outlet?"
|
223 |
collection_name = "test_web_RAG"
|
224 |
|
225 |
+
answer_questions_from_web(api_key, watsonx_project_id, watsonx_url,url, question, collection_name,client)
|
226 |
|
227 |
|
228 |
+
def answer_questions_from_web(request_api_key, request_project_id, request_watsonx_url,url, question, collection_name,client):
|
229 |
# Update the global variable
|
230 |
globals()["api_key"] = request_api_key
|
231 |
globals()["watsonx_project_id"] = request_project_id
|
232 |
+
globals()["watsonx_url"] = request_watsonx_url
|
233 |
+
|
234 |
|
235 |
# Specify model parameters
|
236 |
model_type = "meta-llama/llama-2-70b-chat"
|