Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ import numpy as np
|
|
11 |
import docx
|
12 |
from groq import Groq
|
13 |
import PyPDF2
|
|
|
14 |
|
15 |
# --- Document Loaders ---
|
16 |
def extract_text_from_pdf(pdf_path):
|
@@ -71,13 +72,16 @@ def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
|
|
71 |
# return f"⚠️ Groq API Error: {str(e)}"
|
72 |
|
73 |
#-----------------------------------------
|
|
|
|
|
74 |
def generate_answer_with_groq(question, context, retries=3, delay=2):
|
75 |
url = "https://api.groq.com/openai/v1/chat/completions"
|
76 |
-
api_key=os.environ["GROQ_API_KEY"]
|
77 |
headers = {
|
78 |
"Authorization": f"Bearer {api_key}",
|
79 |
"Content-Type": "application/json",
|
80 |
}
|
|
|
81 |
payload = {
|
82 |
"model": "llama3-8b-8192",
|
83 |
"messages": [
|
@@ -87,17 +91,19 @@ def generate_answer_with_groq(question, context, retries=3, delay=2):
|
|
87 |
"temperature": 0.5,
|
88 |
"max_tokens": 300,
|
89 |
}
|
|
|
90 |
for attempt in range(retries):
|
91 |
try:
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
continue
|
99 |
-
|
100 |
-
|
|
|
101 |
#-----------------------------------------
|
102 |
|
103 |
# --- Twilio Chat Handlers ---
|
|
|
11 |
import docx
|
12 |
from groq import Groq
|
13 |
import PyPDF2
|
14 |
+
import requests
|
15 |
|
16 |
# --- Document Loaders ---
|
17 |
def extract_text_from_pdf(pdf_path):
|
|
|
72 |
# return f"⚠️ Groq API Error: {str(e)}"
|
73 |
|
74 |
#-----------------------------------------
|
75 |
+
import requests # ✅ Add this at the top of your file
|
76 |
+
|
77 |
def generate_answer_with_groq(question, context, retries=3, delay=2):
|
78 |
url = "https://api.groq.com/openai/v1/chat/completions"
|
79 |
+
api_key = os.environ["GROQ_API_KEY"]
|
80 |
headers = {
|
81 |
"Authorization": f"Bearer {api_key}",
|
82 |
"Content-Type": "application/json",
|
83 |
}
|
84 |
+
prompt = f"Based on the following context, answer the question: '{question}'\n\nContext:\n{context}"
|
85 |
payload = {
|
86 |
"model": "llama3-8b-8192",
|
87 |
"messages": [
|
|
|
91 |
"temperature": 0.5,
|
92 |
"max_tokens": 300,
|
93 |
}
|
94 |
+
|
95 |
for attempt in range(retries):
|
96 |
try:
|
97 |
+
response = requests.post(url, headers=headers, json=payload)
|
98 |
+
result = response.json()
|
99 |
+
return result['choices'][0]['message']['content'].strip()
|
100 |
+
except Exception as e:
|
101 |
+
if "503" in str(e) and attempt < retries - 1:
|
102 |
+
time.sleep(delay)
|
103 |
continue
|
104 |
+
else:
|
105 |
+
return f"⚠️ Groq API Error: {str(e)}"
|
106 |
+
|
107 |
#-----------------------------------------
|
108 |
|
109 |
# --- Twilio Chat Handlers ---
|