Spaces:
Sleeping
Sleeping
File size: 1,992 Bytes
075f9e9 ff0e3f2 075f9e9 3f68a6f 075f9e9 739af6c 075f9e9 739af6c 075f9e9 739af6c 075f9e9 70b3f00 ff0e3f2 70b3f00 ff0e3f2 075f9e9 3f68a6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
def chat_stream(self, message, history):
messages = [{"role": "system", "content": self.system_prompt()}]
for msg in history:
if isinstance(msg, dict) and msg.get("role") in ["user", "assistant"]:
messages.append(msg)
messages.append({"role": "user", "content": message})
self.session_log.append({"role": "user", "content": message})
# First non-streamed call to check for tool calls
response = self.openai.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
stream=False # Check for tool calls
)
reply = response.choices[0].message
if reply.tool_calls:
tool_results = self.handle_tool_call(reply.tool_calls)
messages.append(reply)
messages.extend(tool_results)
# Retry final response after tool call
final_response = self.openai.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
stream=True
)
full_response = ""
for chunk in final_response:
delta = chunk.choices[0].delta
if hasattr(delta, "content") and delta.content:
full_response += delta.content
yield full_response
else:
# Normal streaming response
stream = self.openai.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
stream=True
)
full_response = ""
for chunk in stream:
delta = chunk.choices[0].delta
if hasattr(delta, "content") and delta.content:
full_response += delta.content
yield full_response
# Always add follow-up message
full_response += "\n\n💬 Let me know if you’d like to follow up or need help connecting with Jacob."
self.session_log.append({"role": "assistant", "content": full_response})
self.save_session_log()
|