Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,9 +11,6 @@ import numpy as np
|
|
11 |
import docx
|
12 |
from groq import Groq
|
13 |
import PyPDF2
|
14 |
-
from groq import Groq
|
15 |
-
from groq.error import GroqError
|
16 |
-
|
17 |
|
18 |
# --- Document Loaders ---
|
19 |
def extract_text_from_pdf(pdf_path):
|
@@ -52,27 +49,56 @@ def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
|
|
52 |
D, I = index.search(np.array([question_embedding]), k)
|
53 |
return [text_chunks[i] for i in I[0]]
|
54 |
|
55 |
-
def generate_answer_with_groq(question, context, retries=3, delay=2):
|
56 |
-
prompt = f"Based on the following context, answer the question: '{question}'\n\nContext:\n{context}"
|
57 |
-
groq_client = Groq(api_key=os.environ["GROQ_API_KEY"])
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
for attempt in range(retries):
|
60 |
try:
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
)
|
68 |
-
return response.choices[0].message.content
|
69 |
-
except GroqError as e:
|
70 |
-
if "503" in str(e) and attempt < retries - 1:
|
71 |
-
time.sleep(delay)
|
72 |
continue
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
|
77 |
# --- Twilio Chat Handlers ---
|
78 |
def fetch_latest_incoming_message(account_sid, auth_token, conversation_sid):
|
|
|
11 |
import docx
|
12 |
from groq import Groq
|
13 |
import PyPDF2
|
|
|
|
|
|
|
14 |
|
15 |
# --- Document Loaders ---
|
16 |
def extract_text_from_pdf(pdf_path):
|
|
|
49 |
D, I = index.search(np.array([question_embedding]), k)
|
50 |
return [text_chunks[i] for i in I[0]]
|
51 |
|
52 |
+
#def generate_answer_with_groq(question, context, retries=3, delay=2):
|
53 |
+
# prompt = f"Based on the following context, answer the question: '{question}'\n\nContext:\n{context}"
|
54 |
+
# groq_client = Groq(api_key=os.environ["GROQ_API_KEY"])
|
55 |
|
56 |
+
# for attempt in range(retries):
|
57 |
+
# try:
|
58 |
+
# response = groq_client.chat.completions.create(
|
59 |
+
# model="llama3-8b-8192",
|
60 |
+
# messages=[
|
61 |
+
# {"role": "system", "content": "You are an AI Assistant for Small Businesses."},
|
62 |
+
# {"role": "user", "content": prompt},
|
63 |
+
# ]
|
64 |
+
# )
|
65 |
+
# return response.choices[0].message.content
|
66 |
+
# except GroqError as e:
|
67 |
+
# if "503" in str(e) and attempt < retries - 1:
|
68 |
+
# time.sleep(delay)
|
69 |
+
# continue
|
70 |
+
# else:
|
71 |
+
# return f"⚠️ Groq API Error: {str(e)}"
|
72 |
+
|
73 |
+
#-----------------------------------------
|
74 |
+
def generate_answer_with_groq(question, context, retries=3, delay=2):
|
75 |
+
url = "https://api.groq.com/openai/v1/chat/completions"
|
76 |
+
api_key=os.environ["GROQ_API_KEY"]
|
77 |
+
headers = {
|
78 |
+
"Authorization": f"Bearer {api_key}",
|
79 |
+
"Content-Type": "application/json",
|
80 |
+
}
|
81 |
+
payload = {
|
82 |
+
"model": "llama3-8b-8192",
|
83 |
+
"messages": [
|
84 |
+
{"role": "system", "content": "You are an AI Assistant for Small Businesses."},
|
85 |
+
{"role": "user", "content": prompt},
|
86 |
+
],
|
87 |
+
"temperature": 0.5,
|
88 |
+
"max_tokens": 300,
|
89 |
+
}
|
90 |
for attempt in range(retries):
|
91 |
try:
|
92 |
+
response = requests.post(url, headers=headers, json=payload)
|
93 |
+
result = response.json()
|
94 |
+
return result['choices'][0]['message']['content'].strip()
|
95 |
+
except GroqError as e:
|
96 |
+
if "503" in str(e) and attempt < retries - 1:
|
97 |
+
time.sleep(delay)
|
|
|
|
|
|
|
|
|
|
|
98 |
continue
|
99 |
+
else:
|
100 |
+
return f"⚠️ Groq API Error: {str(e)}"
|
101 |
+
#-----------------------------------------
|
102 |
|
103 |
# --- Twilio Chat Handlers ---
|
104 |
def fetch_latest_incoming_message(account_sid, auth_token, conversation_sid):
|