DeepLearning101 commited on
Commit
7631ee8
·
verified ·
1 Parent(s): 9751680

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -31
app.py CHANGED
@@ -8,7 +8,7 @@ LLM_URL = os.environ.get("LLM_URL")
8
 
9
  USER_ID = "HuggingFace Space" # Placeholder user ID
10
 
11
- def send_chat_message(LLM_URL, LLM_API, user_input):
12
  payload = {
13
  "inputs": {},
14
  "query": user_input,
@@ -17,40 +17,56 @@ def send_chat_message(LLM_URL, LLM_API, user_input):
17
  "user": USER_ID,
18
  }
19
  print("Sending chat message payload:", payload) # Debug information
20
- response = requests.post(
21
- url=f"{LLM_URL}/chat-messages",
22
- headers={"Authorization": f"Bearer {LLM_API}"},
23
- json=payload,
24
- stream=True # Enable streaming
25
- )
26
- if response.status_code == 404:
27
- return "Error: Endpoint not found (404)"
28
-
29
- # Handle the stream of events
30
- last_thought = None
31
- try:
32
- for line in response.iter_lines(decode_unicode=True):
33
- if line:
34
- try:
35
- data = json.loads(line.split("data: ")[1])
36
- if data.get("event") == "agent_thought":
37
- last_thought = data.get("thought")
38
- except (IndexError, json.JSONDecodeError):
39
- continue
40
- except json.JSONDecodeError:
41
- return "Error: Invalid JSON response"
42
-
43
- if last_thought:
44
- # Structure the thought text
45
- return last_thought.strip()
46
- else:
47
- return "Error: No thought found in the response"
48
 
49
- def handle_input(user_input):
50
- chat_response = send_chat_message(LLM_URL, LLM_API, user_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  print("Chat response:", chat_response) # Debug information
52
  return chat_response
53
 
 
 
 
 
 
 
 
54
  # Define Gradio interface
55
  user_input = gr.Textbox(label='請輸入您想查詢的關鍵公司名稱')
56
  examples = [
 
8
 
9
  USER_ID = "HuggingFace Space" # Placeholder user ID
10
 
11
+ async def send_chat_message(LLM_URL, LLM_API, user_input):
12
  payload = {
13
  "inputs": {},
14
  "query": user_input,
 
17
  "user": USER_ID,
18
  }
19
  print("Sending chat message payload:", payload) # Debug information
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ async with aiohttp.ClientSession() as session:
22
+ try:
23
+ async with session.post(
24
+ url=f"{LLM_URL}/chat-messages",
25
+ headers={"Authorization": f"Bearer {LLM_API}"},
26
+ json=payload,
27
+ timeout=aiohttp.ClientTimeout(total=60)
28
+ ) as response:
29
+ if response.status != 200:
30
+ print(f"Error: {response.status}")
31
+ return f"Error: {response.status}"
32
+
33
+ full_response = []
34
+ async for line in response.content:
35
+ line = line.decode('utf-8').strip()
36
+ if not line:
37
+ continue
38
+ if "data: " not in line:
39
+ continue
40
+ try:
41
+ print("Received line:", line) # Debug information
42
+ data = json.loads(line.split("data: ")[1])
43
+ if "answer" in data:
44
+ full_response.append(data["answer"])
45
+ except (IndexError, json.JSONDecodeError) as e:
46
+ print(f"Error parsing line: {line}, error: {e}") # Debug information
47
+ continue
48
+
49
+ if full_response:
50
+ return ''.join(full_response).strip()
51
+ else:
52
+ return "Error: No response found in the response"
53
+ except Exception as e:
54
+ print(f"Exception: {e}")
55
+ return f"Exception: {e}"
56
+
57
+ async def handle_input(user_input):
58
+ print(f"Handling input: {user_input}")
59
+ chat_response = await send_chat_message(LLM_URL, LLM_API, user_input)
60
  print("Chat response:", chat_response) # Debug information
61
  return chat_response
62
 
63
+ def run_sync(func, *args):
64
+ loop = asyncio.new_event_loop()
65
+ asyncio.set_event_loop(loop)
66
+ result = loop.run_until_complete(func(*args))
67
+ loop.close()
68
+ return result
69
+
70
  # Define Gradio interface
71
  user_input = gr.Textbox(label='請輸入您想查詢的關鍵公司名稱')
72
  examples = [