Spaces:
Sleeping
Sleeping
Commit
·
da72dc0
1
Parent(s):
eee97a9
Fixed test
Browse files- tests/candidate.py +3 -2
- ui/coding.py +25 -12
tests/candidate.py
CHANGED
|
@@ -12,6 +12,7 @@ from utils.config import Config
|
|
| 12 |
from resources.data import fixed_messages, topic_lists
|
| 13 |
from resources.prompts import prompts
|
| 14 |
from tests.testing_prompts import candidate_prompt
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
def complete_interview(
|
|
@@ -118,8 +119,8 @@ def complete_interview(
|
|
| 118 |
chat_display.append([candidate_message, None])
|
| 119 |
|
| 120 |
send_time = time.time()
|
| 121 |
-
for messages_interviewer, chat_display, previous_code in
|
| 122 |
-
candidate_message, previous_code, messages_interviewer, chat_display
|
| 123 |
):
|
| 124 |
pass
|
| 125 |
|
|
|
|
| 12 |
from resources.data import fixed_messages, topic_lists
|
| 13 |
from resources.prompts import prompts
|
| 14 |
from tests.testing_prompts import candidate_prompt
|
| 15 |
+
from ui.coding import send_request
|
| 16 |
|
| 17 |
|
| 18 |
def complete_interview(
|
|
|
|
| 119 |
chat_display.append([candidate_message, None])
|
| 120 |
|
| 121 |
send_time = time.time()
|
| 122 |
+
for messages_interviewer, chat_display, previous_code, _ in send_request(
|
| 123 |
+
candidate_message, previous_code, messages_interviewer, chat_display, llm, tts=None, silent=True
|
| 124 |
):
|
| 125 |
pass
|
| 126 |
|
ui/coding.py
CHANGED
|
@@ -11,12 +11,21 @@ from functools import partial
|
|
| 11 |
|
| 12 |
|
| 13 |
def send_request(
|
| 14 |
-
code: str,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
) -> Generator[Tuple[List[Dict[str, str]], List[List[Optional[str]]], str, bytes], None, None]:
|
| 16 |
"""
|
| 17 |
Send a request to the LLM and update the chat display and translate it to speech.
|
| 18 |
"""
|
| 19 |
# TODO: Find the way to simplify it and remove duplication in logic
|
|
|
|
|
|
|
|
|
|
| 20 |
chat_history = llm.update_chat_history(code, previous_code, chat_history, chat_display)
|
| 21 |
original_len = len(chat_display)
|
| 22 |
chat_display.append([None, ""])
|
|
@@ -27,11 +36,11 @@ def send_request(
|
|
| 27 |
|
| 28 |
audio_generator = iter(())
|
| 29 |
has_text_item = True
|
| 30 |
-
|
| 31 |
audio_created = 0
|
| 32 |
is_notes = False
|
| 33 |
|
| 34 |
-
while has_text_item or
|
| 35 |
try:
|
| 36 |
text_chunk = next(reply)
|
| 37 |
text_chunks.append(text_chunk)
|
|
@@ -40,12 +49,15 @@ def send_request(
|
|
| 40 |
has_text_item = False
|
| 41 |
chat_history[-1]["content"] = "".join(text_chunks)
|
| 42 |
|
| 43 |
-
|
| 44 |
-
audio_chunk = next(audio_generator)
|
| 45 |
-
has_audion_item = True
|
| 46 |
-
except StopIteration:
|
| 47 |
audio_chunk = b""
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
if has_text_item and not is_notes:
|
| 51 |
last_message = chat_display[-1][1]
|
|
@@ -60,10 +72,11 @@ def send_request(
|
|
| 60 |
for m in split_messages[1:]:
|
| 61 |
chat_display.append([None, m])
|
| 62 |
|
| 63 |
-
if
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
| 67 |
|
| 68 |
yield chat_history, chat_display, code, audio_chunk
|
| 69 |
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
def send_request(
|
| 14 |
+
code: str,
|
| 15 |
+
previous_code: str,
|
| 16 |
+
chat_history: List[Dict[str, str]],
|
| 17 |
+
chat_display: List[List[Optional[str]]],
|
| 18 |
+
llm,
|
| 19 |
+
tts,
|
| 20 |
+
silent: Optional[bool] = False,
|
| 21 |
) -> Generator[Tuple[List[Dict[str, str]], List[List[Optional[str]]], str, bytes], None, None]:
|
| 22 |
"""
|
| 23 |
Send a request to the LLM and update the chat display and translate it to speech.
|
| 24 |
"""
|
| 25 |
# TODO: Find the way to simplify it and remove duplication in logic
|
| 26 |
+
if silent is None:
|
| 27 |
+
silent = os.getenv("SILENT", False)
|
| 28 |
+
|
| 29 |
chat_history = llm.update_chat_history(code, previous_code, chat_history, chat_display)
|
| 30 |
original_len = len(chat_display)
|
| 31 |
chat_display.append([None, ""])
|
|
|
|
| 36 |
|
| 37 |
audio_generator = iter(())
|
| 38 |
has_text_item = True
|
| 39 |
+
has_audio_item = not silent
|
| 40 |
audio_created = 0
|
| 41 |
is_notes = False
|
| 42 |
|
| 43 |
+
while has_text_item or has_audio_item:
|
| 44 |
try:
|
| 45 |
text_chunk = next(reply)
|
| 46 |
text_chunks.append(text_chunk)
|
|
|
|
| 49 |
has_text_item = False
|
| 50 |
chat_history[-1]["content"] = "".join(text_chunks)
|
| 51 |
|
| 52 |
+
if silent:
|
|
|
|
|
|
|
|
|
|
| 53 |
audio_chunk = b""
|
| 54 |
+
else:
|
| 55 |
+
try:
|
| 56 |
+
audio_chunk = next(audio_generator)
|
| 57 |
+
has_audio_item = True
|
| 58 |
+
except StopIteration:
|
| 59 |
+
audio_chunk = b""
|
| 60 |
+
has_audio_item = False
|
| 61 |
|
| 62 |
if has_text_item and not is_notes:
|
| 63 |
last_message = chat_display[-1][1]
|
|
|
|
| 72 |
for m in split_messages[1:]:
|
| 73 |
chat_display.append([None, m])
|
| 74 |
|
| 75 |
+
if not silent:
|
| 76 |
+
if len(chat_display) - original_len > audio_created + has_text_item:
|
| 77 |
+
audio_generator = chain(audio_generator, tts.read_text(chat_display[original_len + audio_created][1]))
|
| 78 |
+
audio_created += 1
|
| 79 |
+
has_audio_item = True
|
| 80 |
|
| 81 |
yield chat_history, chat_display, code, audio_chunk
|
| 82 |
|