File size: 7,868 Bytes
bafde5e
9457397
fa361c9
b106fa1
 
9457397
fa361c9
e24f5bc
fa361c9
f5736a5
bafde5e
fa361c9
 
 
 
 
 
 
2e95bed
 
bafde5e
9457397
fa361c9
bafde5e
 
 
fa361c9
9457397
e4de5ab
e24f5bc
9457397
 
e24f5bc
 
 
 
 
 
5338b7d
9457397
 
e24f5bc
 
 
 
 
 
9457397
 
 
fa361c9
e24f5bc
 
 
 
fa361c9
e24f5bc
 
fa361c9
e24f5bc
 
 
d69e46f
 
 
 
9457397
 
fa361c9
 
 
 
9457397
bafde5e
fa361c9
 
 
 
 
 
 
 
e24f5bc
 
fa361c9
 
 
 
 
e24f5bc
 
6a70fff
 
d69e46f
fa361c9
 
dbf9e7a
eb248c8
fa361c9
 
 
 
2e95bed
fa361c9
 
2e95bed
fa361c9
 
bafde5e
fa361c9
 
 
 
 
 
dda2dda
 
d69e46f
5338b7d
d69e46f
 
bafde5e
fa361c9
 
 
 
 
 
 
d69e46f
dbf9e7a
fa361c9
 
dda2dda
 
 
 
 
 
 
 
 
 
 
 
 
 
fa361c9
 
 
 
d69e46f
fa361c9
d69e46f
fa361c9
d69e46f
 
dda2dda
 
d69e46f
 
 
 
 
 
 
 
5338b7d
d69e46f
5338b7d
 
 
d69e46f
bafde5e
d69e46f
 
 
5338b7d
 
 
 
 
 
 
 
 
 
d69e46f
 
dbf9e7a
 
fa361c9
dbf9e7a
5338b7d
 
 
 
 
 
d69e46f
fa361c9
dda2dda
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
# top of the file
import gradio as gr
import os, time, re, json, base64, asyncio, threading, uuid, io
import numpy as np
import soundfile as sf
from pydub import AudioSegment
from openai import OpenAI
from websockets import connect
from dotenv import load_dotenv

# Load secrets
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
ASSISTANT_ID = os.getenv("ASSISTANT_ID")
client = OpenAI(api_key=OPENAI_API_KEY)

HEADERS = {"Authorization": f"Bearer {OPENAI_API_KEY}", "OpenAI-Beta": "realtime=v1"}
WS_URI = "wss://api.openai.com/v1/realtime?intent=transcription"
connections = {}

# WebSocket Client
class WebSocketClient:
    def __init__(self, uri, headers, client_id):
        self.uri = uri
        self.headers = headers
        self.client_id = client_id
        self.websocket = None
        self.queue = asyncio.Queue(maxsize=10)
        self.transcript = ""
        self.loop = asyncio.new_event_loop()

    async def connect(self):
        try:
            self.websocket = await connect(self.uri, additional_headers=self.headers)
            with open("openai_transcription_settings.json", "r") as f:
                await self.websocket.send(f.read())
            await asyncio.gather(self.receive_messages(), self.send_audio_chunks())
        except Exception as e:
            print(f"\U0001F534 WebSocket Connection Failed: {e}")

    def run(self):
        asyncio.set_event_loop(self.loop)
        self.loop.run_until_complete(self.connect())

    def enqueue_audio_chunk(self, sr, arr):
        if not self.queue.full():
            asyncio.run_coroutine_threadsafe(self.queue.put((sr, arr)), self.loop)

    async def send_audio_chunks(self):
        while True:
            sr, arr = await self.queue.get()
            if arr.ndim > 1:
                arr = arr.mean(axis=1)
            if np.max(np.abs(arr)) > 0:
                arr = arr / np.max(np.abs(arr))
            int16 = (arr * 32767).astype(np.int16)
            buf = io.BytesIO()
            sf.write(buf, int16, sr, format='WAV', subtype='PCM_16')
            audio = AudioSegment.from_file(buf, format="wav").set_frame_rate(24000)
            out = io.BytesIO()
            audio.export(out, format="wav")
            out.seek(0)
            await self.websocket.send(json.dumps({
                "type": "input_audio_buffer.append",
                "audio": base64.b64encode(out.read()).decode()
            }))

    async def receive_messages(self):
        async for msg in self.websocket:
            data = json.loads(msg)
            if data["type"] == "conversation.item.input_audio_transcription.delta":
                self.transcript += data["delta"]

# Real-time transcription connection manager
def create_ws():
    cid = str(uuid.uuid4())
    client = WebSocketClient(WS_URI, HEADERS, cid)
    threading.Thread(target=client.run, daemon=True).start()
    connections[cid] = client
    return cid

def send_audio(chunk, cid):
    if not cid or cid not in connections:
        return "Connecting..."
    sr, arr = chunk
    connections[cid].enqueue_audio_chunk(sr, arr)
    return connections[cid].transcript

def clear_transcript(cid):
    if cid in connections:
        connections[cid].transcript = ""
    return ""

# ============ Chat Assistant ============
def handle_chat(user_input, history, thread_id, image_url):
    if not OPENAI_API_KEY or not ASSISTANT_ID:
        return "โŒ Missing secrets!", history, thread_id, image_url

    try:
        if thread_id is None:
            thread = client.beta.threads.create()
            thread_id = thread.id

        client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
        run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=ASSISTANT_ID)

        while True:
            status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
            if status.status == "completed": break
            time.sleep(1)

        msgs = client.beta.threads.messages.list(thread_id=thread_id)
        for msg in reversed(msgs.data):
            if msg.role == "assistant":
                content = msg.content[0].text.value
                history.append({"role": "user", "content": user_input})
                history.append({"role": "assistant", "content": content})
                match = re.search(
                    r'https://raw\\.githubusercontent\\.com/AndrewLORTech/surgical-pathology-manual/main/[\\w\\-/]*\\.png',
                    content
                )
                if match: image_url = match.group(0)
                break

        return "", history, thread_id, image_url

    except Exception as e:
        return f"โŒ {e}", history, thread_id, image_url

# ============ Gradio UI ============
with gr.Blocks(theme=gr.themes.Soft()) as app:
    gr.Markdown("# ๐Ÿ“„ Document AI Assistant")

    gr.HTML("""
    <style>
    #ask-btn, #clear-chat-btn {
        font-size: 16px !important;
        padding: 10px 20px !important;
    }
    .record-button button {
        font-size: 16px !important;
        padding: 12px 24px !important;
        background-color: #f2f2f2;
    }
    </style>
    """)

    chat_state = gr.State([])
    thread_state = gr.State()
    image_state = gr.State()
    client_id = gr.State()
    voice_enabled = gr.State(False)

    with gr.Row(equal_height=True):
        with gr.Column(scale=1):
            image_display = gr.Image(label="๐Ÿ–ผ๏ธ Document", type="filepath", show_download_button=False)

        with gr.Column(scale=2):
            chat = gr.Chatbot(label="๐Ÿ’ฌ Chat", height=460, type="messages")

            with gr.Row():
                user_prompt = gr.Textbox(placeholder="Ask your question...", show_label=False, scale=6)
                mic_toggle_btn = gr.Button("๐ŸŽ™๏ธ", scale=1)
                send_btn = gr.Button("Send", variant="primary", scale=2)

            with gr.Accordion("๐ŸŽค Voice Transcription", open=False) as voice_section:
                with gr.Row():
                    voice_input = gr.Audio(label="๐ŸŽ™๏ธ Record", streaming=True, elem_classes="record-button")
                    voice_transcript = gr.Textbox(label="Transcript", lines=2, interactive=False)
                with gr.Row():
                    ask_btn = gr.Button("๐ŸŸข Ask", elem_id="ask-btn")
                    clear_chat_btn = gr.Button("๐Ÿงน Clear Chat", elem_id="clear-chat-btn")

    # Functional bindings
    def toggle_voice(curr):
        return not curr, gr.update(visible=not curr)

    def send_transcript_to_assistant(transcript, history, thread_id, image_url):
        if not transcript.strip():
            return gr.update(), history, thread_id, image_url
        return handle_chat(transcript, history, thread_id, image_url)

    def clear_chat_and_transcript(client_id):
        if client_id in connections:
            connections[client_id].transcript = ""
        return [], "", None, None

    mic_toggle_btn.click(fn=toggle_voice, inputs=voice_enabled, outputs=[voice_enabled, voice_section])
    send_btn.click(fn=handle_chat,
                   inputs=[user_prompt, chat_state, thread_state, image_state],
                   outputs=[user_prompt, chat, thread_state, image_state])
    image_state.change(fn=lambda x: x, inputs=image_state, outputs=image_display)
    voice_input.stream(fn=send_audio, inputs=[voice_input, client_id], outputs=voice_transcript, stream_every=0.5)
    ask_btn.click(fn=send_transcript_to_assistant,
                  inputs=[voice_transcript, chat_state, thread_state, image_state],
                  outputs=[user_prompt, chat, thread_state, image_state])
    clear_chat_btn.click(fn=clear_chat_and_transcript,
                         inputs=[client_id],
                         outputs=[chat, voice_transcript, thread_state, image_state])
    app.load(fn=create_ws, outputs=[client_id])

app.launch()