File size: 35,254 Bytes
09425e4 65a422d 09425e4 c7b90e4 09425e4 8498900 09425e4 21057c0 09425e4 8498900 21057c0 09425e4 8498900 09425e4 8498900 09425e4 8498900 09425e4 8498900 09425e4 8498900 09425e4 8498900 09425e4 8498900 09425e4 8498900 09425e4 c7b90e4 09425e4 8498900 09425e4 8498900 55dd096 8498900 09425e4 8498900 09425e4 deeeb78 09425e4 21057c0 4efdaca 21057c0 09425e4 5416d7e 09425e4 23fc48e a24d57f 6b83c96 dd57ac1 37287c3 dd57ac1 0f83e13 dd57ac1 e8e9f36 dd57ac1 37287c3 dd57ac1 e8e9f36 dd57ac1 e8e9f36 dd57ac1 0f83e13 dd57ac1 4efdaca e8e9f36 4efdaca dd57ac1 4efdaca dd57ac1 4efdaca 8498900 fc85021 8498900 a24d57f 14ba4fb e809da8 6b83c96 e809da8 f36ef67 3c9d498 f36ef67 3c9d498 f36ef67 14ba4fb 3c9d498 14ba4fb 3c9d498 14ba4fb 3c9d498 14ba4fb 3c9d498 14ba4fb 21057c0 fc85021 8498900 14ba4fb fc85021 14ba4fb 37b73c5 09425e4 37b73c5 09425e4 21057c0 3df9cb1 09425e4 65a422d e22ec09 65a422d 09425e4 23fc48e 77f24d4 21057c0 77f24d4 21057c0 4efdaca 7abd251 77f24d4 14ba4fb 77f24d4 14ba4fb 77f24d4 c522881 77f24d4 21057c0 09425e4 14ba4fb 09425e4 77f24d4 14ba4fb 09425e4 37b73c5 09425e4 37b73c5 e22ec09 37b73c5 e809da8 37b73c5 3df9cb1 37b73c5 09425e4 37b73c5 09425e4 21057c0 09425e4 7080e6e 16f2269 8a76f56 71aa4a0 8a76f56 7679c88 23a3ce4 fa0e9f1 23a3ce4 fa0e9f1 23a3ce4 42a086a 7940679 23a3ce4 42a086a 9c65a8a a9a59ec 7080e6e 8359520 a9a59ec 4daef9c 8359520 680df35 2dd4970 7080e6e 21057c0 3df9cb1 65a422d 21057c0 37b73c5 3df9cb1 37b73c5 3df9cb1 37b73c5 3df9cb1 37b73c5 21057c0 3df9cb1 37b73c5 3df9cb1 9cff658 37b73c5 9cff658 3df9cb1 21057c0 8498900 14ba4fb 8498900 21057c0 37b73c5 65a422d 37b73c5 21057c0 3df9cb1 21057c0 3df9cb1 09425e4 37b73c5 3df9cb1 37b73c5 3df9cb1 37b73c5 21057c0 3df9cb1 21057c0 37b73c5 21057c0 7abd251 3df9cb1 21057c0 37b73c5 21057c0 09425e4 21057c0 fdef517 09425e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 |
import json
import os
from dataclasses import dataclass
from typing import Dict, List
import gradio as gr
import requests
from bs4 import BeautifulSoup
from openai import OpenAI
@dataclass
class TranscriptSegment:
speaker_id: str
start_time: float
end_time: float
text: str
speaker_name: str = ""
class TranscriptProcessor:
def __init__(
self,
transcript_file: str = None,
transcript_data: dict = None,
max_segment_duration: int = None,
call_type: str = "le",
):
self.transcript_file = transcript_file
self.transcript_data = transcript_data
self.formatted_transcript = None
self.segments = []
self.speaker_mapping = {}
self.max_segment_duration = max_segment_duration
if self.transcript_file:
self._load_transcript()
elif self.transcript_data:
pass # transcript_data is already set
else:
raise ValueError(
"Either transcript_file or transcript_data must be provided."
)
self._process_transcript()
self._create_formatted_transcript() # Create initial formatted transcript
if call_type != "si":
self.map_speaker_ids_to_names()
def _load_transcript(self) -> None:
"""Load the transcript JSON file."""
with open(self.transcript_file, "r") as f:
self.transcript_data = json.load(f)
def _format_time(self, seconds: float) -> str:
"""Convert seconds to formatted time string (MM:SS)."""
minutes = int(seconds // 60)
seconds = int(seconds % 60)
return f"{minutes:02d}:{seconds:02d}"
def _process_transcript(self) -> None:
results = self.transcript_data["results"]
for segment in results["speaker_labels"]["segments"]:
speaker_id = segment.get("speaker_label", segment.get("speakerlabel", ""))
speaker_id = (
speaker_id.replace("spk_", "").replace("spk", "") if speaker_id else ""
)
start_time = float(segment.get("start_time", 0))
end_time = float(segment.get("end_time", 0))
if self.max_segment_duration:
current_start = start_time
while current_start < end_time:
current_end = min(
current_start + self.max_segment_duration, end_time
)
self._create_segment(
speaker_id, current_start, current_end, results["items"]
)
current_start = current_end
else:
self._create_segment(speaker_id, start_time, end_time, results["items"])
def _create_segment(
self, speaker_id: str, start: float, end: float, items: list
) -> None:
matching_items = [
item
for item in items
if "start_time" in item
and float(item["start_time"]) >= start
and float(item["start_time"]) < end
and item["type"] == "pronunciation"
]
words = [item["alternatives"][0]["content"] for item in matching_items]
if words:
self.segments.append(
TranscriptSegment(
speaker_id=speaker_id,
start_time=start,
end_time=end,
text=" ".join(words),
)
)
def _create_formatted_transcript(self) -> None:
"""Create formatted transcript with default speaker labels."""
formatted_segments = []
for seg in self.segments:
start_time_str = self._format_time(seg.start_time)
end_time_str = self._format_time(seg.end_time)
# Use default speaker label (spk_X) if no mapping exists
speaker_label = f"spk_{seg.speaker_id}"
formatted_segments.append(
f"time_stamp: {start_time_str}-{end_time_str}\n"
f"{speaker_label}: {seg.text}\n"
)
self.formatted_transcript = "\n".join(formatted_segments)
def map_speaker_ids_to_names(self) -> None:
"""Map speaker IDs to names based on introductions in the transcript."""
try:
transcript = self.formatted_transcript
prompt = (
"Given the following transcript where speakers are identified as spk 0, spk 1, spk 2, etc., please map each spk ID to the speaker's name based on their introduction in the transcript. If no name is introduced for a speaker, keep it as spk_id. Return the mapping as a JSON object in the format {'spk_0': 'Speaker Name', 'spk_1': 'Speaker Name', ...}\n\n"
f"Transcript:\n{transcript}"
)
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
temperature=0,
)
response_text = completion.choices[0].message.content.strip()
try:
self.speaker_mapping = json.loads(response_text)
except json.JSONDecodeError:
response_text = response_text[
response_text.find("{") : response_text.rfind("}") + 1
]
try:
self.speaker_mapping = json.loads(response_text)
except json.JSONDecodeError:
print("Error parsing speaker mapping JSON.")
self.speaker_mapping = {}
# Update segments with speaker names and recreate formatted transcript
for segment in self.segments:
spk_id = f"spk_{segment.speaker_id}"
speaker_name = self.speaker_mapping.get(spk_id, spk_id)
segment.speaker_name = speaker_name
self._create_formatted_transcript_with_names()
except Exception as e:
print(f"Error mapping speaker IDs to names: {str(e)}")
self.speaker_mapping = {}
def _create_formatted_transcript_with_names(self) -> None:
"""Create formatted transcript with mapped speaker names."""
formatted_segments = []
for seg in self.segments:
start_time_str = self._format_time(seg.start_time)
end_time_str = self._format_time(seg.end_time)
speaker_name = getattr(seg, "speaker_name", f"spk_{seg.speaker_id}")
formatted_segments.append(
f"Start Time: {start_time_str} - End Time: {end_time_str}\n"
f"{speaker_name}: {seg.text}\n"
)
self.formatted_transcript = "\n".join(formatted_segments)
def get_transcript(self) -> str:
"""Return the formatted transcript with speaker names."""
return self.formatted_transcript
def get_transcript_data(self) -> Dict:
"""Return the raw transcript data."""
return self.transcript_data
def setup_openai_key() -> None:
"""Set up OpenAI API key from file."""
try:
with open("api.key", "r") as f:
os.environ["OPENAI_API_KEY"] = f.read().strip()
except FileNotFoundError:
print("Using ENV variable")
# raise FileNotFoundError(
# "api.key file not found. Please create it with your OpenAI API key."
# )
def get_transcript_for_url(url: str) -> dict:
"""
This function fetches the transcript data for a signed URL.
If the URL results in a direct download, it processes the downloaded content.
:param url: Signed URL for the JSON file
:return: Parsed JSON data as a dictionary
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
if "application/json" in response.headers.get("Content-Type", ""):
return response.json() # Parse and return JSON directly
# If not JSON, assume it's a file download (e.g., content-disposition header)
content_disposition = response.headers.get("Content-Disposition", "")
if "attachment" in content_disposition:
# Process the content as JSON
return json.loads(response.content)
return json.loads(response.content)
except requests.exceptions.HTTPError as http_err:
print(f"HTTP error occurred: {http_err}")
except requests.exceptions.RequestException as req_err:
print(f"Request error occurred: {req_err}")
except json.JSONDecodeError as json_err:
print(f"JSON decoding error: {json_err}")
return {}
def get_initial_analysis(
transcript_processor: TranscriptProcessor, cid, rsid, origin, ct, uid
) -> str:
"""Perform initial analysis of the transcript using OpenAI."""
try:
transcript = transcript_processor.get_transcript()
speaker_mapping = transcript_processor.speaker_mapping
client = OpenAI()
if "localhost" in origin:
link_start = "http"
else:
link_start = "https"
if ct == "si": # street interview
prompt = f"""This is a transcript for a street interview. Call Details are as follows:
User ID UID: {uid}
Transcript: {transcript}
Your task is to analyze this street interview transcript and identify the final/best timestamps for each topic or question discussed. Here are the key rules:
The user might repeat the answer to the question sometimes, you need to pick the very last answer intelligently
1. For any topic/answer that appears multiple times in the transcript (even partially):
- The LAST occurrence is always considered the best version. If the same thing is said multiple times, the last time is the best, all previous times are considered as additional takes.
- This includes cases where parts of an answer are scattered throughout the transcript
- Even slight variations of the same answer should be tracked
- List timestamps for ALL takes, with the final take highlighted as the best answer
2. Introduction handling:
- Question 1 is ALWAYS the speaker's introduction/self-introduction
- If someone introduces themselves multiple times, use the last introduction as best answer
- Include all variations of how they state their name/background
- List ALL introduction timestamps chronologically
3. Question sequence:
- After the introduction, list questions in the order they were first asked
- If a question or introduction is revisited later at any point, please use the later timestamp
- Track partial answers to the same question across the transcript
You need to make sure that any words that are repeated, you need to pick the last of them.
Return format:
[Question Title]
Total takes: [X] (Include ONLY if content appears more than once)
- [Take 1. <div id='topic' style="display: inline"> 15s at 12:30 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{750}}&et={{765}}&uid={{uid}})
- [Take 2. <div id='topic' style="display: inline"> 30s at 14:45 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{885}}&et={{915}}&uid={{uid}})
...
- [Take X (Best). <div id='topic' style="display: inline"> 1m 10s at 16:20 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{980}}&et={{1050}}&uid={{uid}})
URL formatting:
- Convert timestamps to seconds (e.g., 10:13 β 613)
- Format: {link_start}://[origin]/colab/[cid]/[rsid]?st=[start_seconds]&et=[end_seconds]&uid=[unique_id]
- Parameters after RSID must start with ? and subsequent parameters use &
Example:
1. Introduction
Total takes: 2
- [Take 1. <div id='topic' style="display: inline"> 10s at 09:45]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{585}}&et={{595}}&uid={{uid}})
- [Take 1. <div id='topic' style="display: inline"> 20s at 25:45]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{1245}}&et={{1265}}&uid={{uid}}))
- [Take 3 (Best). <div id='topic' style="display: inline"> 5s at 10:13 </div>]({link_start}://roll.ai/colab/1234aq_12314/51234151?st=613&et=618&uid=82314)"""
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": f"""You are analyzing a transcript for Call ID: {cid}, Session ID: {rsid}, Origin: {origin}, Call Type: {ct}.
CORE REQUIREMENT:
- TIMESTAMPS: A speaker can repeat the answer to a question multiple times. You need to pick the last answer very carefully and choose that as best take. Make sure that that same answer is not repeated again after the best answer.
YOU SHOULD Prioritize accuracy in timestamp at every cost. Read the Transcript carefully and decide where an answer starts and ends. You will have speaker labels so you need to be very sharp.""",
},
{"role": "user", "content": prompt},
],
stream=True,
temperature=0.1,
)
else:
system_prompt = f"""You are analyzing a transcript for Call ID: {cid}, Session ID: {rsid}, Origin: {origin}, and Call Type: {ct}.
CORE REQUIREMENTS:
1. TIMESTAMPS: Each clip must contain ONLY the specified speaker's dialogue about a single topic. No overlapping dialogue from other speakers. YOU NEED TO BE VERY CAREFUL ABOUT THIS RULE. YOU HAVE THE TRANSCRIPT AND YOU CAN SEE WHO IS SPEAKING AT WHAT TIME, SO BE VERY, VERY CAREFUL AND ONLY INCLUDE THE DIALOGUE OF THE SPEAKER YOU ARE MAKING THE CLIP FOR.
2. DURATION: Clips should be between 20-90 seconds long.
3. CONTENT: Select engaging, viral-worthy topics. Avoid mundane or irrelevant content.
4. COVERAGE: Minimum 2 topics per speaker, aim for 3 if good content exists.
5. YOU CAN IGNORE THE HOST IF NO COMPELLING CONTENT IS FOUND.
YOU SHOULD prioritize accuracy in timestamps at all costs.
"""
user_prompt = f"""Call Details:
User ID: {uid}
Call ID: {cid}
Speakers: {", ".join(speaker_mapping.values())}
Transcript: {transcript}
Your task is to analyze speakers' discussions to identify compelling social media clips. For each speaker, identify key topics that mention people, news, events, trends, or sources.
Format requirements:
1. SPEAKER FORMAT:
**Speaker Name**
1. [Topic title <div id='topic' style="display: inline"> 22s at 12:30 </div>]({{link_start}}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{750}}&et={{772}}&uid={{uid}})
2. [Topic title <div id='topic' style="display: inline"> 43s at 14:45 </div>]({{link_start}}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{885}}&et={{928}}&uid={{uid}})
3. [Topic title <div id='topic' style="display: inline"> 58s at 16:20 </div>]({{link_start}}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{980}}&et={{1038}}&uid={{uid}})
2. TIMESTAMP RULES:
- Start time (st): Must begin exactly when speaker starts discussing the specific topic.
- End time (et): Must end exactly when either:
* The speaker completes their point, or
* Before the next speaker begins.
- NO OVERLAP: Selected duration must NEVER include dialogue from other speakers.
- Duration limits: Minimum 20 seconds, maximum 1 minute 30 seconds.
- Time format: "Xs at HH:MM" where X = seconds.
- URL parameters: Convert display times to seconds.
Example: "25s at 10:13" β st=613&et=638.
3. FORMATTING RULES:
- Speaker names: Use markdown bold (**Name**).
- Topic titles: First word capitalized, rest lowercase.
- Each topic must be a clickable link with correct timestamp.
- URL format: {{link_start}}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{start_time_in_sec}}&et={{end_time_in_sec}}&uid={{uid}}).
4. TOPIC SELECTION:
- Prioritize engaging, viral-worthy content.
- Minimum 2 topics per speaker, aim for 3 if available (SKIP THE HOST if no compelling content).
- Topics should be self-contained discussions within the timestamp.
- Skip speakers if fewer than 2 compelling topics found.
"""
print(user_prompt)
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
stream=True,
temperature=0.1,
)
collected_messages = []
# Iterate through the stream
for chunk in completion:
if chunk.choices[0].delta.content is not None:
chunk_message = chunk.choices[0].delta.content
collected_messages.append(chunk_message)
# Yield the accumulated message so far
yield "".join(collected_messages)
except Exception as e:
print(f"Error in initial analysis: {str(e)}")
yield "An error occurred during initial analysis. Please check your API key and file path."
def chat(
message: str,
chat_history: List,
transcript_processor: TranscriptProcessor,
cid,
rsid,
origin,
ct,
uid,
) -> str:
tools = [
{
"type": "function",
"function": {
"name": "correct_speaker_name_with_url",
"description": "If a User provides a link to Agenda file, call the correct_speaker_name_with_url function to correct the speaker names based on the url, i.e if a user says 'Here is the Luma link for the event' and provides a link to the event, the function will correct the speaker names based on the event.",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The url to the agenda.",
},
},
"required": ["url"],
"additionalProperties": False,
},
},
},
{
"type": "function",
"function": {
"name": "correct_call_type",
"description": "If the user tells you the correct call type, you have to apologize and call this function with correct call type.",
"parameters": {
"type": "object",
"properties": {
"call_type": {
"type": "string",
"description": "The correct call type. If street interview, call type is 'si'.",
},
},
"required": ["call_type"],
"additionalProperties": False,
},
},
},
]
try:
client = OpenAI()
if "localhost" in origin:
link_start = "http"
else:
link_start = "https"
speaker_mapping = transcript_processor.speaker_mapping
prompt = f"""You are a helpful assistant analyzing transcripts and generating timestamps and URL. The user will ask you questions regarding the social media clips from the transcript.
Call ID is {cid},
Session ID is {rsid},
origin is {origin},
Call Type is {ct}.
Speakers: {", ".join(speaker_mapping.values())}
Transcript: {transcript_processor.get_transcript()}
If a user asks timestamps for a specific topic or things, find the start time and end time of that specific topic and return answer in the format:
Answers and URLs should be formated as follows:
[Topic title <div id='topic' style="display: inline"> 22s at 12:30 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{750}}&et={{772}}&uid={{uid}})
For Example:
If the start time is 10:13 and end time is 10:18, the url will be:
{link_start}://roll.ai/colab/1234aq_12314/51234151?st=613&et=618&uid=82314
In the URL, make sure that after RSID there is ? and then rest of the fields are added via &.
You can include multiple links here that can related to the user answer. ALWAYS ANSWER FROM THE TRANSCRIPT.
RULE: When selecting timestamps for the answer, always use the **starting time (XX:YY)** as the reference point for your response, with the duration (Z seconds) calculated from this starting time, not the ending time of the segment.
Example 1:
User: Suggest me some clips that can go viral on Instagram.
Response:
1. [Clip 1 <div id='topic' style="display: inline"> 22s at 12:30 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{750}}&et={{772}}&uid={{uid}})
User: Give me the URL where each person has introduced themselves.
2. [Clip 2 <div id='topic' style="display: inline"> 10s at 10:00 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{600}}&et={{610}}&uid={{uid}})
Example 2:
Provide the exact timestamp where the person begins their introduction, typically starting with phrases like "Hi," "Hello," "I am," or "My name is," and include the full introduction, covering everything they say about themselves, including their name, role, background, current responsibilities, organization, and any additional details they provide about their work or personal interests.
1. [Person Name1 <div id='topic' style="display: inline"> 43s at 14:45 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{885}}&et={{928}}&uid={{uid}})
2. [Person Name2 <div id='topic' style="display: inline"> 58s at 16:20 </div>]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{980}}&et={{1038}}&uid={{uid}})
....
If the user provides a link to the agenda, use the correct_speaker_name_with_url function to correct the speaker names based on the agenda.
If the user provides the correct call type, use the correct_call_type function to correct the call type. Call Type for street interviews is 'si'.
"""
messages = [{"role": "system", "content": prompt}]
print(messages[0]["content"])
for user_msg, assistant_msg in chat_history:
if user_msg is not None: # Skip the initial message where user_msg is None
messages.append({"role": "user", "content": user_msg})
if assistant_msg is not None:
messages.append({"role": "assistant", "content": assistant_msg})
# Add the current message
messages.append({"role": "user", "content": message})
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
tools=tools,
stream=True,
temperature=0.3,
)
collected_messages = []
tool_calls_detected = False
for chunk in completion:
if chunk.choices[0].delta.tool_calls:
tool_calls_detected = True
# Handle tool calls without streaming
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
tools=tools,
)
if response.choices[0].message.tool_calls:
tool_call = response.choices[0].message.tool_calls[0]
if tool_call.function.name == "correct_speaker_name_with_url":
args = eval(tool_call.function.arguments)
url = args.get("url", None)
if url:
transcript_processor.correct_speaker_mapping_with_agenda(
url
)
corrected_speaker_mapping = (
transcript_processor.speaker_mapping
)
function_call_result_message = {
"role": "tool",
"content": json.dumps(
{"speaker_mapping": f"Corrected Speaker Mapping..."}
),
"name": tool_call.function.name,
"tool_call_id": tool_call.id,
}
messages.append(function_call_result_message)
# Get final response after tool call
final_response = client.chat.completions.create(
model="gpt-4o-mini", messages=messages, stream=True
)
# Stream the final response
for final_chunk in final_response:
if final_chunk.choices[0].delta.content:
yield final_chunk.choices[0].delta.content
return
else:
function_call_result_message = {
"role": "tool",
"content": "No URL Provided",
"name": tool_call.function.name,
"tool_call_id": tool_call.id,
}
elif tool_call.function.name == "correct_call_type":
args = eval(tool_call.function.arguments)
call_type = args.get("call_type", None)
if call_type:
# Stream the analysis for corrected call type
for content in get_initial_analysis(
transcript_processor,
call_type,
rsid,
origin,
call_type,
uid,
):
yield content
return
break # Exit streaming loop if tool calls detected
if not tool_calls_detected and chunk.choices[0].delta.content is not None:
chunk_message = chunk.choices[0].delta.content
collected_messages.append(chunk_message)
yield "".join(collected_messages)
except Exception as e:
print(f"Unexpected error in chat: {str(e)}")
import traceback
print(f"Traceback: {traceback.format_exc()}")
yield "Sorry, there was an error processing your request."
def create_chat_interface():
"""Create and configure the chat interface."""
css = """
.gradio-container {
padding-top: 0px !important;
padding-left: 0px !important;
padding-right: 0px !important;
padding: 0px !important;
margin: 0px !important;
}
#component-0 {
gap: 0px !important;
}
.icon-button-wrapper{
display: none !important;
}
footer {
display: none !important;
}
#chatbot_box{
flex-grow: 1 !important;
border-width: 0px !important;
}
#link-frame {
position: absolute !important;
width: 1px !important;
height: 1px !important;
right: -100px !important;
bottom: -100px !important;
display: none !important;
}
.html-container {
display: none !important;
}
a {
text-decoration: none !important;
}
#topic {
color: #aaa !important;
}
.bubble-wrap {
padding-top: 0px !important;
}
.message-content {
border: 0px !important;
margin: 5px !important;
}
.message-row {
border-style: none !important;
margin: 0px !important;
width: 100% !important;
max-width: 100% !important;
}
.flex-wrap {
border-style: none !important;
}
.panel-full-width {
border-style: none !important;
border-width: 0px !important;
}
ol {
list-style-position: outside;
margin-left: 20px;
}
body.waiting * {
cursor: progress;
}
"""
js = """
function createIframeHandler() {
let iframe = document.getElementById('link-frame');
if (!iframe) {
iframe = document.createElement('iframe');
iframe.id = 'link-frame';
iframe.style.position = 'absolute';
iframe.style.width = '1px';
iframe.style.height = '1px';
iframe.style.right = '-100px';
iframe.style.bottom = '-100px';
iframe.style.display = 'none'; // Hidden initially
document.body.appendChild(iframe);
}
document.addEventListener('click', function (event) {
var link = event.target.closest('a');
if (link && link.href) {
document.body.classList.add('waiting');
setTimeout(function () {
document.body.classList.remove('waiting');
}, 2000); // Reset cursor after 1 seconds
try {
iframe.src = link.href;
iframe.style.display = 'block'; // Show iframe on link click
event.preventDefault();
console.log('Opening link in iframe:', link.href);
} catch (error) {
console.error('Failed to open link in iframe:', error);
}
}
});
return 'Iframe handler initialized';
}
"""
with gr.Blocks(
fill_height=True,
fill_width=True,
css=css,
js=js,
theme=gr.themes.Default(
font=[gr.themes.GoogleFont("Inconsolata"), "Arial", "sans-serif"]
),
) as demo:
chatbot = gr.Chatbot(
elem_id="chatbot_box",
layout="bubble",
show_label=False,
show_share_button=False,
show_copy_all_button=False,
show_copy_button=False,
)
msg = gr.Textbox(elem_id="chatbot_textbox", show_label=False)
transcript_processor_state = gr.State() # maintain state of imp things
call_id_state = gr.State()
colab_id_state = gr.State()
origin_state = gr.State()
ct_state = gr.State()
turl_state = gr.State()
uid_state = gr.State()
iframe_html = "<iframe id='link-frame'></iframe>"
gr.HTML(value=iframe_html) # Add iframe to the UI
def respond(
message: str,
chat_history: List,
transcript_processor,
cid,
rsid,
origin,
ct,
uid,
):
if not transcript_processor:
bot_message = "Transcript processor not initialized."
chat_history.append((message, bot_message))
return "", chat_history
chat_history.append((message, ""))
for chunk in chat(
message,
chat_history[:-1], # Exclude the current incomplete message
transcript_processor,
cid,
rsid,
origin,
ct,
uid,
):
chat_history[-1] = (message, chunk)
yield "", chat_history
msg.submit(
respond,
[
msg,
chatbot,
transcript_processor_state,
call_id_state,
colab_id_state,
origin_state,
ct_state,
uid_state,
],
[msg, chatbot],
)
# Handle initial loading with streaming
def on_app_load(request: gr.Request):
cid = request.query_params.get("cid", None)
rsid = request.query_params.get("rsid", None)
origin = request.query_params.get("origin", None)
ct = request.query_params.get("ct", None)
turl = request.query_params.get("turl", None)
uid = request.query_params.get("uid", None)
required_params = ["cid", "rsid", "origin", "ct", "turl", "uid"]
missing_params = [
param
for param in required_params
if request.query_params.get(param) is None
]
if missing_params:
error_message = (
f"Missing required parameters: {', '.join(missing_params)}"
)
chatbot_value = [(None, error_message)]
return [chatbot_value, None, None, None, None, None, None, None]
try:
transcript_data = get_transcript_for_url(turl)
transcript_processor = TranscriptProcessor(
transcript_data=transcript_data,
max_segment_duration=5 if ct != "si" else 10,
call_type=ct,
)
# Initialize with empty message
chatbot_value = [(None, "")]
# Return initial values with the transcript processor
return [
chatbot_value,
transcript_processor,
cid,
rsid,
origin,
ct,
turl,
uid,
]
except Exception as e:
error_message = f"Error processing call_id {cid}: {str(e)}"
chatbot_value = [(None, error_message)]
return [chatbot_value, None, None, None, None, None, None, None]
def stream_initial_analysis(
chatbot_value, transcript_processor, cid, rsid, origin, ct, uid
):
if transcript_processor:
for chunk in get_initial_analysis(
transcript_processor, cid, rsid, origin, ct, uid
):
chatbot_value[0] = (None, chunk)
yield chatbot_value
else:
yield chatbot_value
# Modified load event to handle streaming
demo.load(
on_app_load,
inputs=None,
outputs=[
chatbot,
transcript_processor_state,
call_id_state,
colab_id_state,
origin_state,
ct_state,
turl_state,
uid_state,
],
).then(
stream_initial_analysis,
inputs=[
chatbot,
transcript_processor_state,
call_id_state,
colab_id_state,
origin_state,
ct_state,
uid_state,
],
outputs=[chatbot],
)
return demo
def main():
"""Main function to run the application."""
try:
setup_openai_key()
demo = create_chat_interface()
demo.launch(share=True)
except Exception as e:
print(f"Error starting application: {str(e)}")
raise
if __name__ == "__main__":
main()
|