Futuresony commited on
Commit
d40a7a5
·
verified ·
1 Parent(s): 3f65533

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -3
app.py CHANGED
@@ -6,7 +6,7 @@ For more information on `huggingface_hub` Inference API support, please check th
6
  """
7
  client = InferenceClient(model="Futuresony/future_ai_12_10_2024.gguf")
8
 
9
- # Set fixed parameters
10
  MAX_TOKENS = 512
11
  TEMPERATURE = 0.7
12
  TOP_P = 0.95
@@ -48,7 +48,14 @@ with gr.Blocks() as demo:
48
  label="System Prompt",
49
  placeholder="Enter system instructions here...",
50
  )
51
- user_message = gr.Textbox(label="Your Message", placeholder="Type your message...")
 
 
 
 
 
 
 
52
  send_button = gr.Button("Send")
53
 
54
  # Function to handle user inputs and display assistant responses
@@ -66,6 +73,38 @@ with gr.Blocks() as demo:
66
  outputs=[chatbot, state],
67
  )
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  if __name__ == "__main__":
70
  demo.launch()
71
-
 
6
  """
7
  client = InferenceClient(model="Futuresony/future_ai_12_10_2024.gguf")
8
 
9
+ # Fixed parameters
10
  MAX_TOKENS = 512
11
  TEMPERATURE = 0.7
12
  TOP_P = 0.95
 
48
  label="System Prompt",
49
  placeholder="Enter system instructions here...",
50
  )
51
+ with gr.Row():
52
+ user_message = gr.Textbox(
53
+ label="Your Message",
54
+ placeholder="Type your message or use the mic button...",
55
+ elem_id="user_input_box",
56
+ )
57
+ mic_button = gr.Button(value="🎤", elem_id="mic_button") # Add mic button
58
+
59
  send_button = gr.Button("Send")
60
 
61
  # Function to handle user inputs and display assistant responses
 
73
  outputs=[chatbot, state],
74
  )
75
 
76
+ # Custom JavaScript for Speech Recognition
77
+ speech_recognition_js = """
78
+ let micButton = document.getElementById('mic_button');
79
+ let userInputBox = document.getElementById('user_input_box');
80
+ let recognition = null;
81
+
82
+ if ('webkitSpeechRecognition' in window) {
83
+ recognition = new webkitSpeechRecognition();
84
+ recognition.continuous = false;
85
+ recognition.interimResults = false;
86
+
87
+ recognition.onresult = function(event) {
88
+ const transcript = event.results[0][0].transcript;
89
+ userInputBox.value = transcript;
90
+ };
91
+
92
+ recognition.onspeechend = function() {
93
+ recognition.stop(); // Stop listening after 2 seconds of silence
94
+ };
95
+
96
+ micButton.onclick = function() {
97
+ recognition.start(); // Start listening when mic button is clicked
98
+ };
99
+ } else {
100
+ micButton.onclick = function() {
101
+ alert("Speech recognition is not supported in this browser.");
102
+ };
103
+ }
104
+ """
105
+
106
+ # Inject JavaScript into the app
107
+ demo.load(_js=speech_recognition_js)
108
+
109
  if __name__ == "__main__":
110
  demo.launch()