awacke1 commited on
Commit
cfa51fe
Β·
verified Β·
1 Parent(s): 9fa0eae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -168
app.py CHANGED
@@ -660,176 +660,12 @@ def display_file_manager():
660
  if st.button("πŸ—‘", key=f"delete_{unique_id}"):
661
  os.remove(file)
662
  st.rerun()
663
-
664
- def main():
665
- st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
666
-
667
- # Initialize text input state if not exists
668
- if 'current_text_input' not in st.session_state:
669
- st.session_state.current_text_input = ""
670
-
671
- # Main navigation
672
- tab_main = st.radio("Choose Action:",
673
- ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
674
- horizontal=True)
675
-
676
- if tab_main == "🎀 Voice Input":
677
- st.subheader("Voice Recognition")
678
-
679
- # Display speech recognition component with modified JavaScript
680
- speech_recognition_html_modified = speech_recognition_html.replace(
681
- 'window.parent.postMessage({',
682
- 'window.parent.postMessage({'
683
- ).replace(
684
- """if (finalTranscript) {
685
- fullTranscript += finalTranscript;
686
- // Send to Streamlit
687
- window.parent.postMessage({
688
- type: 'final_transcript',
689
- text: finalTranscript
690
- }, '*');
691
- }""",
692
- """if (finalTranscript) {
693
- fullTranscript += finalTranscript;
694
- // Send to Streamlit
695
- window.parent.postMessage({
696
- type: 'final_transcript',
697
- text: finalTranscript,
698
- updateInput: true
699
- }, '*');
700
- // Also update the text input area if it exists
701
- const textArea = document.querySelector('textarea[aria-label="Message:"]');
702
- if (textArea) {
703
- textArea.value = finalTranscript;
704
- // Trigger input event to update Streamlit
705
- const event = new Event('input', { bubbles: true });
706
- textArea.dispatchEvent(event);
707
- }
708
- }"""
709
- )
710
-
711
- speech_component = st.components.v1.html(speech_recognition_html_modified, height=400)
712
-
713
- # Handle speech recognition output
714
- if speech_component:
715
- try:
716
- data = speech_component
717
- if isinstance(data, dict):
718
- if data.get('type') == 'final_transcript':
719
- text = data.get('text', '').strip()
720
- if text:
721
- st.session_state.last_voice_input = text
722
- # Update the current text input
723
- st.session_state.current_text_input = text
724
-
725
- # Process voice input with AI
726
- st.subheader("AI Response to Voice Input:")
727
-
728
- col1, col2, col3 = st.columns(3)
729
- with col2:
730
- st.write("Claude-3.5 Sonnet:")
731
- try:
732
- claude_response = process_with_claude(text)
733
- except:
734
- st.write('Claude 3.5 Sonnet out of tokens.')
735
- with col1:
736
- st.write("GPT-4o Omni:")
737
- try:
738
- gpt_response = process_with_gpt(text)
739
- except:
740
- st.write('GPT 4o out of tokens')
741
- with col3:
742
- st.write("Arxiv and Mistral Research:")
743
- with st.spinner("Searching ArXiv..."):
744
- results = perform_ai_lookup(text)
745
- st.markdown(results)
746
-
747
- elif data.get('type') == 'clear_transcript':
748
- st.session_state.last_voice_input = ""
749
- st.session_state.current_text_input = ""
750
- st.experimental_rerun()
751
-
752
- except Exception as e:
753
- st.error(f"Error processing voice input: {e}")
754
-
755
- elif tab_main == "πŸ’¬ Chat":
756
- # Use the current_text_input value in the text area
757
- user_input = st.text_area("Message:", value=st.session_state.current_text_input, height=100)
758
-
759
- if st.button("Send πŸ“¨"):
760
- if user_input:
761
- if model_choice == "GPT-4o":
762
- gpt_response = process_with_gpt(user_input)
763
- elif model_choice == "Claude-3":
764
- claude_response = process_with_claude(user_input)
765
- else: # Both
766
- col1, col2, col3 = st.columns(3)
767
- with col2:
768
- st.subheader("Claude-3.5 Sonnet:")
769
- try:
770
- claude_response = process_with_claude(user_input)
771
- except:
772
- st.write('Claude 3.5 Sonnet out of tokens.')
773
- with col1:
774
- st.subheader("GPT-4o Omni:")
775
- try:
776
- gpt_response = process_with_gpt(user_input)
777
- except:
778
- st.write('GPT 4o out of tokens')
779
- with col3:
780
- st.subheader("Arxiv and Mistral Research:")
781
- with st.spinner("Searching ArXiv..."):
782
- #results = search_arxiv(user_input)
783
- results = perform_ai_lookup(user_input)
784
-
785
- st.markdown(results)
786
-
787
- # Display Chat History
788
- st.subheader("Chat History πŸ“œ")
789
- tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
790
-
791
- with tab1:
792
- for chat in st.session_state.chat_history:
793
- st.text_area("You:", chat["user"], height=100)
794
- st.text_area("Claude:", chat["claude"], height=200)
795
- st.markdown(chat["claude"])
796
-
797
- with tab2:
798
- for message in st.session_state.messages:
799
- with st.chat_message(message["role"]):
800
- st.markdown(message["content"])
801
-
802
- elif tab_main == "πŸ“Έ Media Gallery":
803
- create_media_gallery()
804
-
805
- elif tab_main == "πŸ” Search ArXiv":
806
- query = st.text_input("Enter your research query:")
807
- if query:
808
- with st.spinner("Searching ArXiv..."):
809
- results = search_arxiv(query)
810
- st.markdown(results)
811
-
812
- elif tab_main == "πŸ“ File Editor":
813
- if hasattr(st.session_state, 'current_file'):
814
- st.subheader(f"Editing: {st.session_state.current_file}")
815
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
816
- if st.button("Save Changes"):
817
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
818
- file.write(new_content)
819
- st.success("File updated successfully!")
820
-
821
- # Always show file manager in sidebar
822
- display_file_manager()
823
-
824
- if __name__ == "__main__":
825
- main()
826
-
827
- # Speech Recognition HTML Component
828
  speech_recognition_html = """
829
  <!DOCTYPE html>
830
  <html>
831
  <head>
832
- <title>Continuous Speech Demo</title>
833
  <style>
834
  body {
835
  font-family: sans-serif;
@@ -873,6 +709,8 @@ speech_recognition_html = """
873
  <div id="output"></div>
874
 
875
  <script>
 
 
876
  if (!('webkitSpeechRecognition' in window)) {
877
  alert('Speech recognition not supported');
878
  } else {
@@ -889,6 +727,15 @@ speech_recognition_html = """
889
  recognition.continuous = true;
890
  recognition.interimResults = true;
891
 
 
 
 
 
 
 
 
 
 
892
  // Function to start recognition
893
  const startRecognition = () => {
894
  try {
@@ -919,6 +766,7 @@ speech_recognition_html = """
919
  clearButton.onclick = () => {
920
  fullTranscript = '';
921
  output.textContent = '';
 
922
  window.parent.postMessage({
923
  type: 'clear_transcript',
924
  }, '*');
@@ -940,10 +788,11 @@ speech_recognition_html = """
940
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
941
  if (finalTranscript) {
942
  fullTranscript += finalTranscript;
943
- // Send to Streamlit
944
  window.parent.postMessage({
945
  type: 'final_transcript',
946
- text: finalTranscript
 
947
  }, '*');
948
  }
949
  lastUpdateTime = Date.now();
@@ -982,6 +831,137 @@ speech_recognition_html = """
982
  </html>
983
  """
984
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
985
  # Helper Functions
986
  def generate_filename(prompt, file_type):
987
  central = pytz.timezone('US/Central')
 
660
  if st.button("πŸ—‘", key=f"delete_{unique_id}"):
661
  os.remove(file)
662
  st.rerun()
663
+ # First, define the speech recognition HTML template at the global scope, before the main() function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
  speech_recognition_html = """
665
  <!DOCTYPE html>
666
  <html>
667
  <head>
668
+ <title>Voice Recognition</title>
669
  <style>
670
  body {
671
  font-family: sans-serif;
 
709
  <div id="output"></div>
710
 
711
  <script>
712
+ let messageTarget = 'textarea[aria-label="Message:"]';
713
+
714
  if (!('webkitSpeechRecognition' in window)) {
715
  alert('Speech recognition not supported');
716
  } else {
 
727
  recognition.continuous = true;
728
  recognition.interimResults = true;
729
 
730
+ const updateTextArea = (text) => {
731
+ const textArea = document.querySelector(messageTarget);
732
+ if (textArea) {
733
+ textArea.value = text;
734
+ const event = new Event('input', { bubbles: true });
735
+ textArea.dispatchEvent(event);
736
+ }
737
+ };
738
+
739
  // Function to start recognition
740
  const startRecognition = () => {
741
  try {
 
766
  clearButton.onclick = () => {
767
  fullTranscript = '';
768
  output.textContent = '';
769
+ updateTextArea('');
770
  window.parent.postMessage({
771
  type: 'clear_transcript',
772
  }, '*');
 
788
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
789
  if (finalTranscript) {
790
  fullTranscript += finalTranscript;
791
+ updateTextArea(fullTranscript);
792
  window.parent.postMessage({
793
  type: 'final_transcript',
794
+ text: finalTranscript,
795
+ updateInput: true
796
  }, '*');
797
  }
798
  lastUpdateTime = Date.now();
 
831
  </html>
832
  """
833
 
834
+ def main():
835
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
836
+
837
+ # Initialize text input state if not exists
838
+ if 'current_text_input' not in st.session_state:
839
+ st.session_state.current_text_input = ""
840
+
841
+ # Main navigation
842
+ tab_main = st.radio("Choose Action:",
843
+ ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
844
+ horizontal=True)
845
+
846
+ if tab_main == "🎀 Voice Input":
847
+ st.subheader("Voice Recognition")
848
+
849
+ # Display speech recognition component
850
+ speech_component = st.components.v1.html(speech_recognition_html, height=400)
851
+
852
+ # Handle speech recognition output
853
+ if speech_component:
854
+ try:
855
+ data = speech_component
856
+ if isinstance(data, dict) and data.get('type') == 'final_transcript':
857
+ text = data.get('text', '').strip()
858
+ if text:
859
+ st.session_state.last_voice_input = text
860
+ st.session_state.current_text_input = text
861
+
862
+ # Process voice input with AI
863
+ st.subheader("AI Response to Voice Input:")
864
+
865
+ col1, col2, col3 = st.columns(3)
866
+ with col2:
867
+ st.write("Claude-3.5 Sonnet:")
868
+ try:
869
+ claude_response = process_with_claude(text)
870
+ except:
871
+ st.write('Claude 3.5 Sonnet out of tokens.')
872
+ with col1:
873
+ st.write("GPT-4o Omni:")
874
+ try:
875
+ gpt_response = process_with_gpt(text)
876
+ except:
877
+ st.write('GPT 4o out of tokens')
878
+ with col3:
879
+ st.write("Arxiv and Mistral Research:")
880
+ with st.spinner("Searching ArXiv..."):
881
+ results = perform_ai_lookup(text)
882
+ st.markdown(results)
883
+
884
+ elif isinstance(data, dict) and data.get('type') == 'clear_transcript':
885
+ st.session_state.last_voice_input = ""
886
+ st.session_state.current_text_input = ""
887
+ st.experimental_rerun()
888
+
889
+ except Exception as e:
890
+ st.error(f"Error processing voice input: {e}")
891
+
892
+ elif tab_main == "πŸ’¬ Chat":
893
+ # Model Selection
894
+ model_choice = st.sidebar.radio(
895
+ "Choose AI Model:",
896
+ ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
897
+ )
898
+
899
+ # Use the current_text_input value in the text area
900
+ user_input = st.text_area("Message:", value=st.session_state.current_text_input, height=100)
901
+
902
+ if st.button("Send πŸ“¨"):
903
+ if user_input:
904
+ if model_choice == "GPT-4o":
905
+ gpt_response = process_with_gpt(user_input)
906
+ elif model_choice == "Claude-3":
907
+ claude_response = process_with_claude(user_input)
908
+ else: # Both
909
+ col1, col2, col3 = st.columns(3)
910
+ with col2:
911
+ st.subheader("Claude-3.5 Sonnet:")
912
+ try:
913
+ claude_response = process_with_claude(user_input)
914
+ except:
915
+ st.write('Claude 3.5 Sonnet out of tokens.')
916
+ with col1:
917
+ st.subheader("GPT-4o Omni:")
918
+ try:
919
+ gpt_response = process_with_gpt(user_input)
920
+ except:
921
+ st.write('GPT 4o out of tokens')
922
+ with col3:
923
+ st.subheader("Arxiv and Mistral Research:")
924
+ with st.spinner("Searching ArXiv..."):
925
+ results = perform_ai_lookup(user_input)
926
+ st.markdown(results)
927
+
928
+ # Display Chat History
929
+ st.subheader("Chat History πŸ“œ")
930
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
931
+
932
+ with tab1:
933
+ for chat in st.session_state.chat_history:
934
+ st.text_area("You:", chat["user"], height=100)
935
+ st.text_area("Claude:", chat["claude"], height=200)
936
+ st.markdown(chat["claude"])
937
+
938
+ with tab2:
939
+ for message in st.session_state.messages:
940
+ with st.chat_message(message["role"]):
941
+ st.markdown(message["content"])
942
+
943
+ elif tab_main == "πŸ“Έ Media Gallery":
944
+ create_media_gallery()
945
+
946
+ elif tab_main == "πŸ” Search ArXiv":
947
+ query = st.text_input("Enter your research query:")
948
+ if query:
949
+ with st.spinner("Searching ArXiv..."):
950
+ results = search_arxiv(query)
951
+ st.markdown(results)
952
+
953
+ elif tab_main == "πŸ“ File Editor":
954
+ if hasattr(st.session_state, 'current_file'):
955
+ st.subheader(f"Editing: {st.session_state.current_file}")
956
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
957
+ if st.button("Save Changes"):
958
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
959
+ file.write(new_content)
960
+ st.success("File updated successfully!")
961
+
962
+ # Always show file manager in sidebar
963
+ display_file_manager()
964
+
965
  # Helper Functions
966
  def generate_filename(prompt, file_type):
967
  central = pytz.timezone('US/Central')