awacke1 commited on
Commit
b75652c
ยท
verified ยท
1 Parent(s): cfa51fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -200
app.py CHANGED
@@ -660,12 +660,100 @@ def display_file_manager():
660
  if st.button("๐Ÿ—‘", key=f"delete_{unique_id}"):
661
  os.remove(file)
662
  st.rerun()
663
- # First, define the speech recognition HTML template at the global scope, before the main() function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
  speech_recognition_html = """
665
  <!DOCTYPE html>
666
  <html>
667
  <head>
668
- <title>Voice Recognition</title>
669
  <style>
670
  body {
671
  font-family: sans-serif;
@@ -709,8 +797,6 @@ speech_recognition_html = """
709
  <div id="output"></div>
710
 
711
  <script>
712
- let messageTarget = 'textarea[aria-label="Message:"]';
713
-
714
  if (!('webkitSpeechRecognition' in window)) {
715
  alert('Speech recognition not supported');
716
  } else {
@@ -727,15 +813,6 @@ speech_recognition_html = """
727
  recognition.continuous = true;
728
  recognition.interimResults = true;
729
 
730
- const updateTextArea = (text) => {
731
- const textArea = document.querySelector(messageTarget);
732
- if (textArea) {
733
- textArea.value = text;
734
- const event = new Event('input', { bubbles: true });
735
- textArea.dispatchEvent(event);
736
- }
737
- };
738
-
739
  // Function to start recognition
740
  const startRecognition = () => {
741
  try {
@@ -766,7 +843,6 @@ speech_recognition_html = """
766
  clearButton.onclick = () => {
767
  fullTranscript = '';
768
  output.textContent = '';
769
- updateTextArea('');
770
  window.parent.postMessage({
771
  type: 'clear_transcript',
772
  }, '*');
@@ -788,11 +864,10 @@ speech_recognition_html = """
788
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
789
  if (finalTranscript) {
790
  fullTranscript += finalTranscript;
791
- updateTextArea(fullTranscript);
792
  window.parent.postMessage({
793
  type: 'final_transcript',
794
- text: finalTranscript,
795
- updateInput: true
796
  }, '*');
797
  }
798
  lastUpdateTime = Date.now();
@@ -831,137 +906,6 @@ speech_recognition_html = """
831
  </html>
832
  """
833
 
834
- def main():
835
- st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
836
-
837
- # Initialize text input state if not exists
838
- if 'current_text_input' not in st.session_state:
839
- st.session_state.current_text_input = ""
840
-
841
- # Main navigation
842
- tab_main = st.radio("Choose Action:",
843
- ["๐ŸŽค Voice Input", "๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
844
- horizontal=True)
845
-
846
- if tab_main == "๐ŸŽค Voice Input":
847
- st.subheader("Voice Recognition")
848
-
849
- # Display speech recognition component
850
- speech_component = st.components.v1.html(speech_recognition_html, height=400)
851
-
852
- # Handle speech recognition output
853
- if speech_component:
854
- try:
855
- data = speech_component
856
- if isinstance(data, dict) and data.get('type') == 'final_transcript':
857
- text = data.get('text', '').strip()
858
- if text:
859
- st.session_state.last_voice_input = text
860
- st.session_state.current_text_input = text
861
-
862
- # Process voice input with AI
863
- st.subheader("AI Response to Voice Input:")
864
-
865
- col1, col2, col3 = st.columns(3)
866
- with col2:
867
- st.write("Claude-3.5 Sonnet:")
868
- try:
869
- claude_response = process_with_claude(text)
870
- except:
871
- st.write('Claude 3.5 Sonnet out of tokens.')
872
- with col1:
873
- st.write("GPT-4o Omni:")
874
- try:
875
- gpt_response = process_with_gpt(text)
876
- except:
877
- st.write('GPT 4o out of tokens')
878
- with col3:
879
- st.write("Arxiv and Mistral Research:")
880
- with st.spinner("Searching ArXiv..."):
881
- results = perform_ai_lookup(text)
882
- st.markdown(results)
883
-
884
- elif isinstance(data, dict) and data.get('type') == 'clear_transcript':
885
- st.session_state.last_voice_input = ""
886
- st.session_state.current_text_input = ""
887
- st.experimental_rerun()
888
-
889
- except Exception as e:
890
- st.error(f"Error processing voice input: {e}")
891
-
892
- elif tab_main == "๐Ÿ’ฌ Chat":
893
- # Model Selection
894
- model_choice = st.sidebar.radio(
895
- "Choose AI Model:",
896
- ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
897
- )
898
-
899
- # Use the current_text_input value in the text area
900
- user_input = st.text_area("Message:", value=st.session_state.current_text_input, height=100)
901
-
902
- if st.button("Send ๐Ÿ“จ"):
903
- if user_input:
904
- if model_choice == "GPT-4o":
905
- gpt_response = process_with_gpt(user_input)
906
- elif model_choice == "Claude-3":
907
- claude_response = process_with_claude(user_input)
908
- else: # Both
909
- col1, col2, col3 = st.columns(3)
910
- with col2:
911
- st.subheader("Claude-3.5 Sonnet:")
912
- try:
913
- claude_response = process_with_claude(user_input)
914
- except:
915
- st.write('Claude 3.5 Sonnet out of tokens.')
916
- with col1:
917
- st.subheader("GPT-4o Omni:")
918
- try:
919
- gpt_response = process_with_gpt(user_input)
920
- except:
921
- st.write('GPT 4o out of tokens')
922
- with col3:
923
- st.subheader("Arxiv and Mistral Research:")
924
- with st.spinner("Searching ArXiv..."):
925
- results = perform_ai_lookup(user_input)
926
- st.markdown(results)
927
-
928
- # Display Chat History
929
- st.subheader("Chat History ๐Ÿ“œ")
930
- tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
931
-
932
- with tab1:
933
- for chat in st.session_state.chat_history:
934
- st.text_area("You:", chat["user"], height=100)
935
- st.text_area("Claude:", chat["claude"], height=200)
936
- st.markdown(chat["claude"])
937
-
938
- with tab2:
939
- for message in st.session_state.messages:
940
- with st.chat_message(message["role"]):
941
- st.markdown(message["content"])
942
-
943
- elif tab_main == "๐Ÿ“ธ Media Gallery":
944
- create_media_gallery()
945
-
946
- elif tab_main == "๐Ÿ” Search ArXiv":
947
- query = st.text_input("Enter your research query:")
948
- if query:
949
- with st.spinner("Searching ArXiv..."):
950
- results = search_arxiv(query)
951
- st.markdown(results)
952
-
953
- elif tab_main == "๐Ÿ“ File Editor":
954
- if hasattr(st.session_state, 'current_file'):
955
- st.subheader(f"Editing: {st.session_state.current_file}")
956
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
957
- if st.button("Save Changes"):
958
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
959
- file.write(new_content)
960
- st.success("File updated successfully!")
961
-
962
- # Always show file manager in sidebar
963
- display_file_manager()
964
-
965
  # Helper Functions
966
  def generate_filename(prompt, file_type):
967
  central = pytz.timezone('US/Central')
@@ -1107,11 +1051,7 @@ def get_media_html(media_path, media_type="video", width="100%"):
1107
 
1108
  def main():
1109
  st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
1110
-
1111
- # Initialize text input state if not exists
1112
- if 'current_text_input' not in st.session_state:
1113
- st.session_state.current_text_input = ""
1114
-
1115
  # Main navigation
1116
  tab_main = st.radio("Choose Action:",
1117
  ["๐ŸŽค Voice Input", "๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
@@ -1120,39 +1060,8 @@ def main():
1120
  if tab_main == "๐ŸŽค Voice Input":
1121
  st.subheader("Voice Recognition")
1122
 
1123
- # Display speech recognition component with modified JavaScript
1124
- speech_recognition_html_modified = speech_recognition_html.replace(
1125
- 'window.parent.postMessage({',
1126
- 'window.parent.postMessage({'
1127
- ).replace(
1128
- """if (finalTranscript) {
1129
- fullTranscript += finalTranscript;
1130
- // Send to Streamlit
1131
- window.parent.postMessage({
1132
- type: 'final_transcript',
1133
- text: finalTranscript
1134
- }, '*');
1135
- }""",
1136
- """if (finalTranscript) {
1137
- fullTranscript += finalTranscript;
1138
- // Send to Streamlit
1139
- window.parent.postMessage({
1140
- type: 'final_transcript',
1141
- text: finalTranscript,
1142
- updateInput: true
1143
- }, '*');
1144
- // Also update the text input area if it exists
1145
- const textArea = document.querySelector('textarea[aria-label="Message:"]');
1146
- if (textArea) {
1147
- textArea.value = finalTranscript;
1148
- // Trigger input event to update Streamlit
1149
- const event = new Event('input', { bubbles: true });
1150
- textArea.dispatchEvent(event);
1151
- }
1152
- }"""
1153
- )
1154
-
1155
- speech_component = st.components.v1.html(speech_recognition_html_modified, height=400)
1156
 
1157
  # Handle speech recognition output
1158
  if speech_component:
@@ -1163,8 +1072,6 @@ def main():
1163
  text = data.get('text', '').strip()
1164
  if text:
1165
  st.session_state.last_voice_input = text
1166
- # Update the current text input
1167
- st.session_state.current_text_input = text
1168
 
1169
  # Process voice input with AI
1170
  st.subheader("AI Response to Voice Input:")
@@ -1190,24 +1097,19 @@ def main():
1190
 
1191
  elif data.get('type') == 'clear_transcript':
1192
  st.session_state.last_voice_input = ""
1193
- st.session_state.current_text_input = ""
1194
- st.rerun()
1195
 
1196
  except Exception as e:
1197
  st.error(f"Error processing voice input: {e}")
1198
-
1199
-
1200
-
 
1201
 
1202
  # [Rest of the main function remains the same]
1203
  elif tab_main == "๐Ÿ’ฌ Chat":
1204
- # Use the current_text_input value in the text area
1205
- user_input = st.text_area("Message:", value=st.session_state.current_text_input, height=100)
1206
-
1207
- if st.button("Send ๐Ÿ“จ"):
1208
- if user_input:
1209
- # Process the input as before
1210
- pass
1211
 
1212
  elif tab_main == "๐Ÿ“ธ Media Gallery":
1213
  create_media_gallery()
 
660
  if st.button("๐Ÿ—‘", key=f"delete_{unique_id}"):
661
  os.remove(file)
662
  st.rerun()
663
+
664
+
665
+ def main():
666
+ st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
667
+
668
+ # Main navigation
669
+ tab_main = st.radio("Choose Action:",
670
+ ["๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
671
+ horizontal=True)
672
+
673
+ if tab_main == "๐Ÿ’ฌ Chat":
674
+ # Model Selection
675
+ model_choice = st.sidebar.radio(
676
+ "Choose AI Model:",
677
+ ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
678
+ )
679
+
680
+ # Chat Interface
681
+ user_input = st.text_area("Message:", height=100)
682
+
683
+ if st.button("Send ๐Ÿ“จ"):
684
+ if user_input:
685
+ if model_choice == "GPT-4o":
686
+ gpt_response = process_with_gpt(user_input)
687
+ elif model_choice == "Claude-3":
688
+ claude_response = process_with_claude(user_input)
689
+ else: # Both
690
+ col1, col2, col3 = st.columns(3)
691
+ with col2:
692
+ st.subheader("Claude-3.5 Sonnet:")
693
+ try:
694
+ claude_response = process_with_claude(user_input)
695
+ except:
696
+ st.write('Claude 3.5 Sonnet out of tokens.')
697
+ with col1:
698
+ st.subheader("GPT-4o Omni:")
699
+ try:
700
+ gpt_response = process_with_gpt(user_input)
701
+ except:
702
+ st.write('GPT 4o out of tokens')
703
+ with col3:
704
+ st.subheader("Arxiv and Mistral Research:")
705
+ with st.spinner("Searching ArXiv..."):
706
+ #results = search_arxiv(user_input)
707
+ results = perform_ai_lookup(user_input)
708
+
709
+ st.markdown(results)
710
+
711
+ # Display Chat History
712
+ st.subheader("Chat History ๐Ÿ“œ")
713
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
714
+
715
+ with tab1:
716
+ for chat in st.session_state.chat_history:
717
+ st.text_area("You:", chat["user"], height=100)
718
+ st.text_area("Claude:", chat["claude"], height=200)
719
+ st.markdown(chat["claude"])
720
+
721
+ with tab2:
722
+ for message in st.session_state.messages:
723
+ with st.chat_message(message["role"]):
724
+ st.markdown(message["content"])
725
+
726
+ elif tab_main == "๐Ÿ“ธ Media Gallery":
727
+ create_media_gallery()
728
+
729
+ elif tab_main == "๐Ÿ” Search ArXiv":
730
+ query = st.text_input("Enter your research query:")
731
+ if query:
732
+ with st.spinner("Searching ArXiv..."):
733
+ results = search_arxiv(query)
734
+ st.markdown(results)
735
+
736
+ elif tab_main == "๐Ÿ“ File Editor":
737
+ if hasattr(st.session_state, 'current_file'):
738
+ st.subheader(f"Editing: {st.session_state.current_file}")
739
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
740
+ if st.button("Save Changes"):
741
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
742
+ file.write(new_content)
743
+ st.success("File updated successfully!")
744
+
745
+ # Always show file manager in sidebar
746
+ display_file_manager()
747
+
748
+ if __name__ == "__main__":
749
+ main()
750
+
751
+ # Speech Recognition HTML Component
752
  speech_recognition_html = """
753
  <!DOCTYPE html>
754
  <html>
755
  <head>
756
+ <title>Continuous Speech Demo</title>
757
  <style>
758
  body {
759
  font-family: sans-serif;
 
797
  <div id="output"></div>
798
 
799
  <script>
 
 
800
  if (!('webkitSpeechRecognition' in window)) {
801
  alert('Speech recognition not supported');
802
  } else {
 
813
  recognition.continuous = true;
814
  recognition.interimResults = true;
815
 
 
 
 
 
 
 
 
 
 
816
  // Function to start recognition
817
  const startRecognition = () => {
818
  try {
 
843
  clearButton.onclick = () => {
844
  fullTranscript = '';
845
  output.textContent = '';
 
846
  window.parent.postMessage({
847
  type: 'clear_transcript',
848
  }, '*');
 
864
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
865
  if (finalTranscript) {
866
  fullTranscript += finalTranscript;
867
+ // Send to Streamlit
868
  window.parent.postMessage({
869
  type: 'final_transcript',
870
+ text: finalTranscript
 
871
  }, '*');
872
  }
873
  lastUpdateTime = Date.now();
 
906
  </html>
907
  """
908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
909
  # Helper Functions
910
  def generate_filename(prompt, file_type):
911
  central = pytz.timezone('US/Central')
 
1051
 
1052
  def main():
1053
  st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
1054
+
 
 
 
 
1055
  # Main navigation
1056
  tab_main = st.radio("Choose Action:",
1057
  ["๐ŸŽค Voice Input", "๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
 
1060
  if tab_main == "๐ŸŽค Voice Input":
1061
  st.subheader("Voice Recognition")
1062
 
1063
+ # Display speech recognition component
1064
+ speech_component = st.components.v1.html(speech_recognition_html, height=400)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
 
1066
  # Handle speech recognition output
1067
  if speech_component:
 
1072
  text = data.get('text', '').strip()
1073
  if text:
1074
  st.session_state.last_voice_input = text
 
 
1075
 
1076
  # Process voice input with AI
1077
  st.subheader("AI Response to Voice Input:")
 
1097
 
1098
  elif data.get('type') == 'clear_transcript':
1099
  st.session_state.last_voice_input = ""
1100
+ st.experimental_rerun()
 
1101
 
1102
  except Exception as e:
1103
  st.error(f"Error processing voice input: {e}")
1104
+
1105
+ # Display last voice input
1106
+ if st.session_state.last_voice_input:
1107
+ st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
1108
 
1109
  # [Rest of the main function remains the same]
1110
  elif tab_main == "๐Ÿ’ฌ Chat":
1111
+ # [Previous chat interface code]
1112
+ pass
 
 
 
 
 
1113
 
1114
  elif tab_main == "๐Ÿ“ธ Media Gallery":
1115
  create_media_gallery()