Update app.py
Browse files
app.py
CHANGED
|
@@ -501,7 +501,10 @@ def get_response_from_llama(query, model, selected_docs, file_type, num_calls=1,
|
|
| 501 |
# Truncate context
|
| 502 |
context = truncate_context(context)
|
| 503 |
|
| 504 |
-
|
|
|
|
|
|
|
|
|
|
| 505 |
|
| 506 |
elif file_type == "pdf":
|
| 507 |
# PDF functionality
|
|
@@ -520,7 +523,10 @@ def get_response_from_llama(query, model, selected_docs, file_type, num_calls=1,
|
|
| 520 |
Your goal is to provide accurate, detailed, and precise summaries based on the context provided.
|
| 521 |
Avoid making assumptions or adding information that is not explicitly supported by the context from the PDF documents."""
|
| 522 |
|
| 523 |
-
|
|
|
|
|
|
|
|
|
|
| 524 |
|
| 525 |
else:
|
| 526 |
raise ValueError("Invalid file type. Use 'excel' or 'pdf'.")
|
|
@@ -530,7 +536,7 @@ def get_response_from_llama(query, model, selected_docs, file_type, num_calls=1,
|
|
| 530 |
try:
|
| 531 |
# Generate content with streaming enabled
|
| 532 |
for response in client.chat_completion(
|
| 533 |
-
|
| 534 |
max_new_tokens=1000, # Reduced to ensure we stay within token limits
|
| 535 |
temperature=temperature,
|
| 536 |
stream=True,
|
|
|
|
| 501 |
# Truncate context
|
| 502 |
context = truncate_context(context)
|
| 503 |
|
| 504 |
+
messages = [
|
| 505 |
+
{"role": "system", "content": system_instruction},
|
| 506 |
+
{"role": "user", "content": f"Based on the following data extracted from Excel spreadsheets:\n{context}\n\nPlease provide the Python code needed to execute the following task: '{query}'. Ensure that the code is derived directly from the dataset. If a chart is requested, use the matplotlib library to generate the appropriate visualization."}
|
| 507 |
+
]
|
| 508 |
|
| 509 |
elif file_type == "pdf":
|
| 510 |
# PDF functionality
|
|
|
|
| 523 |
Your goal is to provide accurate, detailed, and precise summaries based on the context provided.
|
| 524 |
Avoid making assumptions or adding information that is not explicitly supported by the context from the PDF documents."""
|
| 525 |
|
| 526 |
+
messages = [
|
| 527 |
+
{"role": "system", "content": system_instruction},
|
| 528 |
+
{"role": "user", "content": f"Using the following context from the PDF documents:\n{context_str}\n\nPlease generate a step-by-step reasoning before arriving at a comprehensive and accurate summary addressing the following question: '{query}'. Ensure your response is strictly based on the provided context, highlighting key metrics, trends, and significant details relevant to the query. Avoid any speculative or unverified information."}
|
| 529 |
+
]
|
| 530 |
|
| 531 |
else:
|
| 532 |
raise ValueError("Invalid file type. Use 'excel' or 'pdf'.")
|
|
|
|
| 536 |
try:
|
| 537 |
# Generate content with streaming enabled
|
| 538 |
for response in client.chat_completion(
|
| 539 |
+
messages=messages, # Pass messages in the required format
|
| 540 |
max_new_tokens=1000, # Reduced to ensure we stay within token limits
|
| 541 |
temperature=temperature,
|
| 542 |
stream=True,
|