Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -286,85 +286,32 @@ def format_kadi_api_doc_context(docs):
|
|
| 286 |
|
| 287 |
|
| 288 |
def rag_workflow(query):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
|
|
|
|
|
|
|
|
|
| 290 |
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
- Document 1: Provides information on how to use a Python library to interact with the HTTP API of 'Kadi4Mat'.
|
| 296 |
-
- Document 2: Provides information on how to use a Python library to implement custom CLI commands to interact with 'Kadi4Mat'.
|
| 297 |
-
|
| 298 |
-
Your task is to select the single most likely option.
|
| 299 |
-
If Document 1 is the best choice, respond with 'kadi-apy python library'.
|
| 300 |
-
If Document 2 is the best choice, respond with 'kadi-apy python cli library'.
|
| 301 |
-
Respond with only the exact corresponding option and do not include any additional comments, explanations, or text."
|
| 302 |
-
"""
|
| 303 |
-
)
|
| 304 |
-
library_usage_prediction = llm.predict(prompt)
|
| 305 |
-
|
| 306 |
-
print("METADATA PREDICTION -------------------------:", library_usage_prediction)
|
| 307 |
-
print(library_usage_prediction)
|
| 308 |
-
|
| 309 |
-
rewritten_query_response = llm.invoke(rewrite_prompt)
|
| 310 |
-
rewritten_query = rewritten_query_response.content.strip()
|
| 311 |
-
|
| 312 |
-
print("Predicted library to search in", library_usage_prediction)
|
| 313 |
-
print("Rewritten query:", rewritten_query)
|
| 314 |
-
|
| 315 |
-
kadi_apy_docs = vector_store.similarity_search(query, k=5, filter={"usage": "doc"})
|
| 316 |
-
kadi_apy_sourcecode = vector_store.similarity_search(query, k=5, filter={"usage": library_usage_prediction})
|
| 317 |
-
|
| 318 |
-
doc_context = format_kadi_api_doc_context(kadi_apy_docs)
|
| 319 |
-
code_context = format_kadi_apy_library_context(kadi_apy_sourcecode)
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
print("H")
|
| 325 |
-
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
|
| 326 |
-
for doc in kadi_apy_sourcecode:
|
| 327 |
-
print(doc.metadata.get("source", "Unknown Type"))
|
| 328 |
-
print("\n")
|
| 329 |
-
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
|
| 330 |
-
print("CODE_CONTEST\n:", code_context)
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
|
| 334 |
-
for doc in kadi_apy_docs:
|
| 335 |
-
print(doc.metadata.get("source", "Unknown Type"))
|
| 336 |
-
print("\n")
|
| 337 |
-
print("::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::")
|
| 338 |
-
|
| 339 |
-
print(doc_context)
|
| 340 |
-
print("DOC_CONTEXT\n:", code_context)
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
prompt = f"""You are an expert python developer. You are assisting in generating code for users who wants to make use of "kadi-apy", an API library.
|
| 344 |
-
"Doc-context:" provides you with information how to use this API library by givnig code examples and code documentation.
|
| 345 |
-
"Code-context:" provides you information of API methods and classes from the "kadi-apy" library.
|
| 346 |
-
Based on the retrieved contexts and the guidelines answer the query.
|
| 347 |
-
|
| 348 |
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
- First display the full code and then follow with a well structured explanation of the generated code.
|
| 354 |
|
| 355 |
-
|
| 356 |
-
|
|
|
|
| 357 |
|
| 358 |
-
|
| 359 |
-
{code_context}
|
| 360 |
-
|
| 361 |
-
Query:
|
| 362 |
-
{query}
|
| 363 |
-
"""
|
| 364 |
|
| 365 |
-
|
| 366 |
-
response = llm.invoke(prompt)
|
| 367 |
-
return response.content
|
| 368 |
|
| 369 |
|
| 370 |
def get_chroma_vectorstore2(embedding_model):
|
|
|
|
| 286 |
|
| 287 |
|
| 288 |
def rag_workflow(query):
|
| 289 |
+
"""
|
| 290 |
+
RAGChain class to perform the complete RAG workflow.
|
| 291 |
+
"""
|
| 292 |
+
# Assume 'llm' and 'vector_store' are already initialized instances
|
| 293 |
+
rag_chain = RAGChain(llm, vector_store)
|
| 294 |
|
| 295 |
+
# Step 1: Predict which library usage is relevant
|
| 296 |
+
library_usage_prediction = rag_chain.predict_library_usage(query)
|
| 297 |
+
print(f"Predicted library usage: {library_usage_prediction}")
|
| 298 |
|
| 299 |
+
# Step 2: Retrieve contexts (documents and code snippets)
|
| 300 |
+
doc_contexts, code_contexts = rag_chain.retrieve_contexts(query, library_usage_prediction)
|
| 301 |
+
print("Retrieved Document Contexts:", doc_contexts)
|
| 302 |
+
print("Retrieved Code Contexts:", code_contexts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
|
| 304 |
+
# Step 3: Format the contexts
|
| 305 |
+
formatted_doc_context, formatted_code_context = rag_chain.format_context(doc_contexts, code_contexts)
|
| 306 |
+
print("Formatted Document Contexts:", formatted_doc_context)
|
| 307 |
+
print("Formatted Code Contexts:", formatted_code_context)
|
|
|
|
| 308 |
|
| 309 |
+
# Step 4: Generate the final response
|
| 310 |
+
response = rag_chain.generate_response(query, formatted_doc_context, formatted_code_context)
|
| 311 |
+
print("Generated Response:", response)
|
| 312 |
|
| 313 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 314 |
|
|
|
|
|
|
|
|
|
|
| 315 |
|
| 316 |
|
| 317 |
def get_chroma_vectorstore2(embedding_model):
|