Futuresony commited on
Commit
045101e
·
verified ·
1 Parent(s): a71a723

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -77
app.py CHANGED
@@ -6,13 +6,15 @@ For more information on `huggingface_hub` Inference API support, please check th
6
  """
7
  client = InferenceClient(model="Futuresony/future_ai_12_10_2024.gguf")
8
 
9
- # Fixed parameters
10
- MAX_TOKENS = 512
11
- TEMPERATURE = 0.7
12
- TOP_P = 0.95
13
 
14
-
15
- def respond(message, history: list[tuple[str, str]], system_message):
 
 
 
 
 
 
16
  messages = [{"role": "system", "content": system_message}]
17
 
18
  for val in history:
@@ -27,10 +29,10 @@ def respond(message, history: list[tuple[str, str]], system_message):
27
 
28
  for message in client.chat_completion(
29
  messages,
30
- max_tokens=MAX_TOKENS,
31
  stream=True,
32
- temperature=TEMPERATURE,
33
- top_p=TOP_P,
34
  ):
35
  token = message.choices[0].delta.content
36
 
@@ -38,75 +40,83 @@ def respond(message, history: list[tuple[str, str]], system_message):
38
  yield response
39
 
40
 
41
- # Gradio interface setup
42
- with gr.Blocks() as demo:
43
- # Chatbot Interface
44
- chatbot = gr.Chatbot(type="messages") # Use 'messages' format for compatibility
45
- state = gr.State([])
46
- system_message = gr.Textbox(
47
- value="You are a helpful assistant.",
48
- label="System Prompt",
49
- placeholder="Enter system instructions here...",
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  with gr.Row():
52
- user_message = gr.Textbox(
53
- label="Your Message",
54
- placeholder="Type your message or use the mic button...",
55
- elem_id="user_input_box",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  )
57
- mic_button = gr.Button(value="🎤", elem_id="mic_button") # Add mic button
58
-
59
- send_button = gr.Button("Send")
60
-
61
- # Function to handle user inputs and display assistant responses
62
- def chat(user_input, chat_history, sys_msg):
63
- response_generator = respond(user_input, chat_history, sys_msg)
64
- response = ""
65
- for partial_response in response_generator:
66
- response = partial_response
67
- chat_history.append((user_input, response))
68
- return chat_history, chat_history
69
-
70
- send_button.click(
71
- chat,
72
- inputs=[user_message, state, system_message],
73
- outputs=[chatbot, state],
74
- )
75
-
76
- # Inject JavaScript for Speech Recognition using Gradio's HTML component
77
- gr.HTML(
78
- """
79
- <script>
80
- let micButton = document.getElementById('mic_button');
81
- let userInputBox = document.getElementById('user_input_box');
82
- let recognition = null;
83
-
84
- if ('webkitSpeechRecognition' in window) {
85
- recognition = new webkitSpeechRecognition();
86
- recognition.continuous = false;
87
- recognition.interimResults = false;
88
-
89
- recognition.onresult = function(event) {
90
- const transcript = event.results[0][0].transcript;
91
- userInputBox.value = transcript;
92
- };
93
-
94
- recognition.onspeechend = function() {
95
- recognition.stop(); // Stop listening after 2 seconds of silence
96
- };
97
-
98
- micButton.onclick = function() {
99
- recognition.start(); // Start listening when mic button is clicked
100
- };
101
- } else {
102
- micButton.onclick = function() {
103
- alert("Speech recognition is not supported in this browser.");
104
- };
105
- }
106
- </script>
107
- """
108
- )
109
-
110
- if __name__ == "__main__":
111
  demo.launch()
112
 
 
6
  """
7
  client = InferenceClient(model="Futuresony/future_ai_12_10_2024.gguf")
8
 
 
 
 
 
9
 
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
 
29
 
30
  for message in client.chat_completion(
31
  messages,
32
+ max_tokens=max_tokens,
33
  stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
 
 
40
  yield response
41
 
42
 
43
+ # JavaScript for speech recognition
44
+ speech_recognition_js = """
45
+ <script>
46
+ let micButton = document.getElementById('mic_button');
47
+ let userInputBox = document.getElementById('user_input');
48
+ let recognition = null;
49
+
50
+ if ('webkitSpeechRecognition' in window) {
51
+ recognition = new webkitSpeechRecognition();
52
+ recognition.continuous = false;
53
+ recognition.interimResults = false;
54
+
55
+ recognition.onresult = function(event) {
56
+ const transcript = event.results[0][0].transcript;
57
+ userInputBox.value = transcript;
58
+ };
59
+
60
+ recognition.onspeechend = function() {
61
+ recognition.stop(); // Stop listening after 2 seconds of silence
62
+ };
63
+
64
+ micButton.onclick = function() {
65
+ recognition.start(); // Start listening when mic button is clicked
66
+ };
67
+ } else {
68
+ micButton.onclick = function() {
69
+ alert("Speech recognition is not supported in this browser.");
70
+ };
71
+ }
72
+ </script>
73
+ """
74
+
75
+ # Gradio Chat Interface
76
+ demo = gr.ChatInterface(
77
+ fn=respond,
78
+ additional_inputs=[
79
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
80
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
81
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
82
+ gr.Slider(
83
+ minimum=0.1,
84
+ maximum=1.0,
85
+ value=0.95,
86
+ step=0.05,
87
+ label="Top-p (nucleus sampling)",
88
+ ),
89
+ ],
90
+ )
91
+
92
+ # Add mic button to the interface
93
+ with gr.Blocks() as ui:
94
  with gr.Row():
95
+ gr.HTML(
96
+ """
97
+ <style>
98
+ #mic_button {
99
+ font-family: 'Material Symbols Outlined';
100
+ font-size: 24px;
101
+ color: #555;
102
+ border: none;
103
+ background: none;
104
+ cursor: pointer;
105
+ padding: 4px;
106
+ margin-right: 8px;
107
+ }
108
+ #user_input {
109
+ width: 100%;
110
+ }
111
+ </style>
112
+ <link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined">
113
+ <div style="display: flex; align-items: center;">
114
+ <button id="mic_button">🎤</button>
115
+ <input id="user_input" type="text" placeholder="Type your message here..." />
116
+ </div>
117
+ """
118
  )
119
+ gr.HTML(speech_recognition_js) # Inject JavaScript for mic button
120
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  demo.launch()
122