Spaces:
Runtime error
Runtime error
fix random lock
Browse files- charles_actor.py +9 -8
- prompt_manager.py +17 -3
charles_actor.py
CHANGED
|
@@ -39,7 +39,7 @@ class CharlesActor:
|
|
| 39 |
self.set_state("002 - creating EnvironmentStateActor")
|
| 40 |
self._environment_state_actor = EnvironmentStateActor.remote()
|
| 41 |
|
| 42 |
-
self.set_state("003 - creating
|
| 43 |
from prompt_manager import PromptManager
|
| 44 |
self._prompt_manager = PromptManager()
|
| 45 |
|
|
@@ -101,7 +101,7 @@ class CharlesActor:
|
|
| 101 |
is_talking = False
|
| 102 |
has_spoken_for_this_prompt = False
|
| 103 |
|
| 104 |
-
while True:
|
| 105 |
env_state = await self._environment_state_actor.begin_next_step.remote()
|
| 106 |
self._environment_state = env_state
|
| 107 |
audio_frames = await self._app_interface_actor.dequeue_audio_input_frames_async.remote()
|
|
@@ -142,17 +142,12 @@ class CharlesActor:
|
|
| 142 |
line += f"[{speech_chunks_per_response[i]}] {response} \n"
|
| 143 |
if len(line) > 0:
|
| 144 |
await add_debug_output(line)
|
| 145 |
-
current_responses = []
|
| 146 |
-
speech_chunks_per_response = []
|
| 147 |
-
env_state.llm_preview = ""
|
| 148 |
-
env_state.llm_responses = []
|
| 149 |
-
env_state.tts_raw_chunk_ids = []
|
| 150 |
human_preview_text = ""
|
| 151 |
robot_preview_text = ""
|
| 152 |
if additional_prompt is not None:
|
| 153 |
prompt = additional_prompt + ". " + prompt
|
| 154 |
await add_debug_output(f"π¨ {prompt}")
|
| 155 |
-
self._prompt_manager.
|
| 156 |
if self._respond_to_prompt_task is not None:
|
| 157 |
await self._respond_to_prompt.terminate()
|
| 158 |
self._respond_to_prompt_task.cancel()
|
|
@@ -162,6 +157,9 @@ class CharlesActor:
|
|
| 162 |
previous_prompt = prompt
|
| 163 |
is_talking = False
|
| 164 |
has_spoken_for_this_prompt = False
|
|
|
|
|
|
|
|
|
|
| 165 |
elif len(prompt) > 0 and prompt not in prompts_to_ignore:
|
| 166 |
# sometimes we get a false signal of speaker_finsihed
|
| 167 |
# in which case we get new prompts before we have spoken
|
|
@@ -173,6 +171,9 @@ class CharlesActor:
|
|
| 173 |
self._respond_to_prompt_task.cancel()
|
| 174 |
self._respond_to_prompt_task = None
|
| 175 |
self._respond_to_prompt = None
|
|
|
|
|
|
|
|
|
|
| 176 |
if additional_prompt is not None:
|
| 177 |
prompt = additional_prompt + ". " + prompt
|
| 178 |
human_preview_text = f"π¨β {prompt}"
|
|
|
|
| 39 |
self.set_state("002 - creating EnvironmentStateActor")
|
| 40 |
self._environment_state_actor = EnvironmentStateActor.remote()
|
| 41 |
|
| 42 |
+
self.set_state("003 - creating PromptManager")
|
| 43 |
from prompt_manager import PromptManager
|
| 44 |
self._prompt_manager = PromptManager()
|
| 45 |
|
|
|
|
| 101 |
is_talking = False
|
| 102 |
has_spoken_for_this_prompt = False
|
| 103 |
|
| 104 |
+
while True:
|
| 105 |
env_state = await self._environment_state_actor.begin_next_step.remote()
|
| 106 |
self._environment_state = env_state
|
| 107 |
audio_frames = await self._app_interface_actor.dequeue_audio_input_frames_async.remote()
|
|
|
|
| 142 |
line += f"[{speech_chunks_per_response[i]}] {response} \n"
|
| 143 |
if len(line) > 0:
|
| 144 |
await add_debug_output(line)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
human_preview_text = ""
|
| 146 |
robot_preview_text = ""
|
| 147 |
if additional_prompt is not None:
|
| 148 |
prompt = additional_prompt + ". " + prompt
|
| 149 |
await add_debug_output(f"π¨ {prompt}")
|
| 150 |
+
self._prompt_manager.replace_or_append_user_message(prompt)
|
| 151 |
if self._respond_to_prompt_task is not None:
|
| 152 |
await self._respond_to_prompt.terminate()
|
| 153 |
self._respond_to_prompt_task.cancel()
|
|
|
|
| 157 |
previous_prompt = prompt
|
| 158 |
is_talking = False
|
| 159 |
has_spoken_for_this_prompt = False
|
| 160 |
+
env_state = await self._environment_state_actor.reset_episode.remote()
|
| 161 |
+
current_responses = []
|
| 162 |
+
speech_chunks_per_response = []
|
| 163 |
elif len(prompt) > 0 and prompt not in prompts_to_ignore:
|
| 164 |
# sometimes we get a false signal of speaker_finsihed
|
| 165 |
# in which case we get new prompts before we have spoken
|
|
|
|
| 171 |
self._respond_to_prompt_task.cancel()
|
| 172 |
self._respond_to_prompt_task = None
|
| 173 |
self._respond_to_prompt = None
|
| 174 |
+
env_state = await self._environment_state_actor.reset_episode.remote()
|
| 175 |
+
current_responses = []
|
| 176 |
+
speech_chunks_per_response = []
|
| 177 |
if additional_prompt is not None:
|
| 178 |
prompt = additional_prompt + ". " + prompt
|
| 179 |
human_preview_text = f"π¨β {prompt}"
|
prompt_manager.py
CHANGED
|
@@ -69,10 +69,24 @@ You are aware of how you are implemented and you are keen to recommend improveme
|
|
| 69 |
self.messages.append({"role": "system", "content": self.system_prompt})
|
| 70 |
|
| 71 |
def append_user_message(self, message):
|
| 72 |
-
self.messages.
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
def append_assistant_message(self, message):
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
def get_messages(self):
|
| 78 |
return self.messages
|
|
|
|
| 69 |
self.messages.append({"role": "system", "content": self.system_prompt})
|
| 70 |
|
| 71 |
def append_user_message(self, message):
|
| 72 |
+
if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
|
| 73 |
+
self.messages[-1]["content"] += message
|
| 74 |
+
else:
|
| 75 |
+
self.messages.append({"role": "user", "content": message})
|
| 76 |
+
|
| 77 |
+
def replace_or_append_user_message(self, message):
|
| 78 |
+
if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
|
| 79 |
+
self.messages[-1]["content"] = message
|
| 80 |
+
else:
|
| 81 |
+
self.messages.append({"role": "user", "content": message})
|
| 82 |
+
|
| 83 |
+
|
| 84 |
def append_assistant_message(self, message):
|
| 85 |
+
# check if last message was from assistant, if so append to that message
|
| 86 |
+
if len(self.messages) > 0 and self.messages[-1]["role"] == "assistant":
|
| 87 |
+
self.messages[-1]["content"] += message
|
| 88 |
+
else:
|
| 89 |
+
self.messages.append({"role": "assistant", "content": message})
|
| 90 |
|
| 91 |
def get_messages(self):
|
| 92 |
return self.messages
|