awacke1 commited on
Commit
611aff6
·
verified ·
1 Parent(s): b75652c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -205
app.py CHANGED
@@ -31,25 +31,35 @@ from urllib.parse import quote
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
- # 1. Configuration and Setup
35
- Site_Name = '🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI'
36
- title = "🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI"
37
- helpURL = 'https://huggingface.co/awacke1'
38
- bugURL = 'https://huggingface.co/spaces/awacke1'
39
- icons = '🚲🏆'
40
-
41
  st.set_page_config(
42
- page_title=title,
43
- page_icon=icons,
44
  layout="wide",
45
  initial_sidebar_state="auto",
46
  menu_items={
47
- 'Get Help': helpURL,
48
- 'Report a bug': bugURL,
49
- 'About': title
50
  }
51
  )
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  # 2. Load environment variables and initialize clients
54
  load_dotenv()
55
 
@@ -141,58 +151,6 @@ st.markdown("""
141
  """, unsafe_allow_html=True)
142
 
143
 
144
- # Bike Collections
145
- bike_collections = {
146
- "Celestial Collection 🌌": {
147
- "Eclipse Vaulter": {
148
- "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
149
- The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
150
- Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
151
- Camera angle: Low angle, wide shot
152
- Lighting: Dramatic rim lighting from eclipse
153
- Color palette: Deep purples, cosmic blues, corona gold""",
154
- "emoji": "🌑"
155
- },
156
- "Starlight Leaper": {
157
- "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
158
- Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
159
- Camera angle: Wide-angle upward shot
160
- Lighting: Natural starlight with subtle rim lighting
161
- Color palette: Deep blues, silver highlights, cosmic purples""",
162
- "emoji": "✨"
163
- },
164
- "Moonlit Hopper": {
165
- "prompt": """A sleek black bike mid-hop over a moonlit meadow,
166
- the full moon illuminating the misty surroundings. Fireflies dance around the bike,
167
- and soft shadows create a serene yet dynamic atmosphere.
168
- Camera angle: Side profile with slight low angle
169
- Lighting: Soft moonlight with atmospheric fog
170
- Color palette: Silver blues, soft whites, deep shadows""",
171
- "emoji": "🌙"
172
- }
173
- },
174
- "Nature-Inspired Collection 🌲": {
175
- "Shadow Grasshopper": {
176
- "prompt": """A black bike jumping between forest paths,
177
- with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
178
- as it soars above mossy logs.
179
- Camera angle: Through-the-trees tracking shot
180
- Lighting: Natural forest lighting with sun rays
181
- Color palette: Forest greens, golden sunlight, deep shadows""",
182
- "emoji": "🦗"
183
- },
184
- "Onyx Leapfrog": {
185
- "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
186
- the reflection on the water broken into ripples by the leap. The surrounding forest
187
- is vibrant with greens and browns.
188
- Camera angle: Low angle from water level
189
- Lighting: Golden hour side lighting
190
- Color palette: Deep blacks, water blues, forest greens""",
191
- "emoji": "🐸"
192
- }
193
- }
194
- }
195
-
196
 
197
  # Helper Functions
198
  def generate_filename(prompt, file_type):
@@ -747,165 +705,103 @@ def main():
747
 
748
  if __name__ == "__main__":
749
  main()
750
-
751
- # Speech Recognition HTML Component
752
  speech_recognition_html = """
753
  <!DOCTYPE html>
754
  <html>
755
  <head>
756
- <title>Continuous Speech Demo</title>
757
  <style>
758
- body {
759
- font-family: sans-serif;
760
- padding: 20px;
761
- max-width: 800px;
762
- margin: 0 auto;
763
- }
764
- button {
765
- padding: 10px 20px;
766
- margin: 10px 5px;
767
- font-size: 16px;
768
- }
769
- #status {
770
- margin: 10px 0;
771
- padding: 10px;
772
- background: #e8f5e9;
773
- border-radius: 4px;
774
- }
775
- #output {
776
- white-space: pre-wrap;
777
- padding: 15px;
778
- background: #f5f5f5;
779
- border-radius: 4px;
780
- margin: 10px 0;
781
- min-height: 100px;
782
- max-height: 400px;
783
- overflow-y: auto;
784
- }
785
- .controls {
786
- margin: 10px 0;
787
- }
788
  </style>
789
  </head>
790
  <body>
791
- <div class="controls">
792
- <button id="start">Start Listening</button>
793
- <button id="stop" disabled>Stop Listening</button>
794
- <button id="clear">Clear Text</button>
 
795
  </div>
796
- <div id="status">Ready</div>
797
  <div id="output"></div>
798
 
799
  <script>
800
- if (!('webkitSpeechRecognition' in window)) {
801
- alert('Speech recognition not supported');
802
- } else {
803
- const recognition = new webkitSpeechRecognition();
804
- const startButton = document.getElementById('start');
805
- const stopButton = document.getElementById('stop');
806
- const clearButton = document.getElementById('clear');
807
- const status = document.getElementById('status');
808
- const output = document.getElementById('output');
809
- let fullTranscript = '';
810
- let lastUpdateTime = Date.now();
811
-
812
- // Configure recognition
813
- recognition.continuous = true;
814
- recognition.interimResults = true;
815
-
816
- // Function to start recognition
817
- const startRecognition = () => {
818
- try {
819
- recognition.start();
820
- status.textContent = 'Listening...';
821
- startButton.disabled = true;
822
- stopButton.disabled = false;
823
- } catch (e) {
824
- console.error(e);
825
- status.textContent = 'Error: ' + e.message;
 
 
 
 
 
 
 
 
 
 
826
  }
827
- };
828
-
829
- // Auto-start on load
830
- window.addEventListener('load', () => {
831
- setTimeout(startRecognition, 1000);
832
- });
833
-
834
- startButton.onclick = startRecognition;
835
-
836
- stopButton.onclick = () => {
837
- recognition.stop();
838
- status.textContent = 'Stopped';
839
- startButton.disabled = false;
840
- stopButton.disabled = true;
841
- };
842
-
843
- clearButton.onclick = () => {
844
- fullTranscript = '';
845
- output.textContent = '';
846
- window.parent.postMessage({
847
- type: 'clear_transcript',
848
- }, '*');
849
- };
850
-
851
- recognition.onresult = (event) => {
852
- let interimTranscript = '';
853
- let finalTranscript = '';
854
-
855
- for (let i = event.resultIndex; i < event.results.length; i++) {
856
- const transcript = event.results[i][0].transcript;
857
- if (event.results[i].isFinal) {
858
- finalTranscript += transcript + '\\n';
859
- } else {
860
- interimTranscript += transcript;
861
- }
862
- }
863
-
864
- if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
865
- if (finalTranscript) {
866
- fullTranscript += finalTranscript;
867
- // Send to Streamlit
868
- window.parent.postMessage({
869
- type: 'final_transcript',
870
- text: finalTranscript
871
- }, '*');
872
- }
873
- lastUpdateTime = Date.now();
874
- }
875
-
876
- output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
877
- output.scrollTop = output.scrollHeight;
878
- };
879
-
880
- recognition.onend = () => {
881
- if (!stopButton.disabled) {
882
- try {
883
- recognition.start();
884
- console.log('Restarted recognition');
885
- } catch (e) {
886
- console.error('Failed to restart recognition:', e);
887
- status.textContent = 'Error restarting: ' + e.message;
888
- startButton.disabled = false;
889
- stopButton.disabled = true;
890
- }
891
- }
892
- };
893
-
894
- recognition.onerror = (event) => {
895
- console.error('Recognition error:', event.error);
896
- status.textContent = 'Error: ' + event.error;
897
-
898
- if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
899
- startButton.disabled = false;
900
- stopButton.disabled = true;
901
- }
902
- };
903
- }
904
  </script>
905
  </body>
906
  </html>
907
  """
908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
909
  # Helper Functions
910
  def generate_filename(prompt, file_type):
911
  central = pytz.timezone('US/Central')
 
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
+ # Page Configuration
 
 
 
 
 
 
35
  st.set_page_config(
36
+ page_title="🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI",
37
+ page_icon="🚲🏆",
38
  layout="wide",
39
  initial_sidebar_state="auto",
40
  menu_items={
41
+ 'Get Help': 'https://huggingface.co/awacke1',
42
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
43
+ 'About': "🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI"
44
  }
45
  )
46
 
47
+ # Initialize Session States
48
+ if 'transcript' not in st.session_state:
49
+ st.session_state.transcript = ""
50
+ if "messages" not in st.session_state:
51
+ st.session_state.messages = []
52
+ if "chat_history" not in st.session_state:
53
+ st.session_state.chat_history = []
54
+
55
+
56
+ # Function to Process Voice Input
57
+ def process_transcript(transcript):
58
+ if transcript:
59
+ st.session_state.transcript += transcript
60
+ st.markdown(f"### Updated Transcript:\n{st.session_state.transcript}")
61
+
62
+
63
  # 2. Load environment variables and initialize clients
64
  load_dotenv()
65
 
 
151
  """, unsafe_allow_html=True)
152
 
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
  # Helper Functions
156
  def generate_filename(prompt, file_type):
 
705
 
706
  if __name__ == "__main__":
707
  main()
708
+ # Speech Recognition HTML
 
709
  speech_recognition_html = """
710
  <!DOCTYPE html>
711
  <html>
712
  <head>
713
+ <title>Speech Recognition</title>
714
  <style>
715
+ body { font-family: Arial, sans-serif; padding: 20px; }
716
+ #output { padding: 10px; background: #f1f1f1; margin-top: 10px; border-radius: 5px; }
717
+ button { margin: 5px; padding: 10px 15px; font-size: 14px; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718
  </style>
719
  </head>
720
  <body>
721
+ <h1>🎤 Continuous Speech Recognition</h1>
722
+ <div>
723
+ <button id="start">Start</button>
724
+ <button id="stop" disabled>Stop</button>
725
+ <button id="clear">Clear</button>
726
  </div>
 
727
  <div id="output"></div>
728
 
729
  <script>
730
+ const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
731
+ recognition.continuous = true;
732
+ recognition.interimResults = true;
733
+
734
+ let finalTranscript = '';
735
+ const startButton = document.getElementById('start');
736
+ const stopButton = document.getElementById('stop');
737
+ const clearButton = document.getElementById('clear');
738
+ const output = document.getElementById('output');
739
+
740
+ startButton.onclick = () => {
741
+ recognition.start();
742
+ startButton.disabled = true;
743
+ stopButton.disabled = false;
744
+ };
745
+
746
+ stopButton.onclick = () => {
747
+ recognition.stop();
748
+ startButton.disabled = false;
749
+ stopButton.disabled = true;
750
+ };
751
+
752
+ clearButton.onclick = () => {
753
+ finalTranscript = '';
754
+ output.textContent = '';
755
+ window.parent.postMessage({ type: 'clear_transcript' }, '*');
756
+ };
757
+
758
+ recognition.onresult = (event) => {
759
+ let interimTranscript = '';
760
+ for (let i = event.resultIndex; i < event.results.length; i++) {
761
+ const transcript = event.results[i][0].transcript;
762
+ if (event.results[i].isFinal) {
763
+ finalTranscript += transcript + '\\n';
764
+ } else {
765
+ interimTranscript += transcript;
766
  }
767
+ }
768
+ output.textContent = finalTranscript + interimTranscript;
769
+ window.parent.postMessage({ type: 'final_transcript', text: finalTranscript }, '*');
770
+ };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771
  </script>
772
  </body>
773
  </html>
774
  """
775
 
776
+ # Embed Speech Recognition HTML
777
+ st.components.v1.html(speech_recognition_html, height=400)
778
+
779
+ # Listen for Transcript Updates
780
+ if "last_message" in st.session_state:
781
+ try:
782
+ if st.session_state.last_message["type"] == "final_transcript":
783
+ process_transcript(st.session_state.last_message["text"])
784
+ elif st.session_state.last_message["type"] == "clear_transcript":
785
+ st.session_state.transcript = ""
786
+ st.experimental_rerun()
787
+ except KeyError:
788
+ pass
789
+
790
+ # File Management Functions
791
+ def generate_filename(prompt, file_type):
792
+ """Generate a unique filename."""
793
+ central = pytz.timezone('US/Central')
794
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
795
+ safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
796
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
797
+
798
+ def create_file(filename, content):
799
+ """Save content to a file."""
800
+ with open(filename, 'w', encoding='utf-8') as f:
801
+ f.write(content)
802
+
803
+
804
+
805
  # Helper Functions
806
  def generate_filename(prompt, file_type):
807
  central = pytz.timezone('US/Central')