Spestly commited on
Commit
66e319b
Β·
verified Β·
1 Parent(s): 1c4e5c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -24
app.py CHANGED
@@ -96,12 +96,25 @@ with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme) as demo:
96
  gr.Markdown("# πŸš€ Athena Playground Chat")
97
  gr.Markdown("*Powered by HuggingFace ZeroGPU*")
98
 
99
- # Placeholders for config components
100
- model_choice = gr.State("Athena-R3X 8B")
101
- max_length = gr.State(512)
102
- temperature = gr.State(0.7)
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
- # Main chat interface
105
  chat_interface = gr.ChatInterface(
106
  fn=respond,
107
  additional_inputs=[model_choice, max_length, temperature],
@@ -124,26 +137,10 @@ with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme) as demo:
124
  type="messages"
125
  )
126
 
127
- # Accordion at the bottom for configurations
128
  with gr.Accordion("Configurations", open=False):
129
- model_choice = gr.Dropdown(
130
- label="πŸ“± Model",
131
- choices=list(MODELS.keys()),
132
- value="Athena-R3X 8B",
133
- info="Select which Athena model to use"
134
- )
135
- max_length = gr.Slider(
136
- 32, 2048, value=512,
137
- label="πŸ“ Max Tokens",
138
- info="Maximum number of tokens to generate"
139
- )
140
- temperature = gr.Slider(
141
- 0.1, 2.0, value=0.7,
142
- label="🎨 Creativity",
143
- info="Higher values = more creative responses"
144
- )
145
- # Link the config components to the chat interface's additional_inputs
146
- chat_interface.additional_inputs = [model_choice, max_length, temperature]
147
 
148
  if __name__ == "__main__":
149
  demo.launch()
 
96
  gr.Markdown("# πŸš€ Athena Playground Chat")
97
  gr.Markdown("*Powered by HuggingFace ZeroGPU*")
98
 
99
+ # 1. Declare config components FIRST
100
+ model_choice = gr.Dropdown(
101
+ label="πŸ“± Model",
102
+ choices=list(MODELS.keys()),
103
+ value="Athena-R3X 8B",
104
+ info="Select which Athena model to use"
105
+ )
106
+ max_length = gr.Slider(
107
+ 32, 2048, value=512,
108
+ label="πŸ“ Max Tokens",
109
+ info="Maximum number of tokens to generate"
110
+ )
111
+ temperature = gr.Slider(
112
+ 0.1, 2.0, value=0.7,
113
+ label="🎨 Creativity",
114
+ info="Higher values = more creative responses"
115
+ )
116
 
117
+ # 2. Create the chat interface, passing the controls as additional_inputs
118
  chat_interface = gr.ChatInterface(
119
  fn=respond,
120
  additional_inputs=[model_choice, max_length, temperature],
 
137
  type="messages"
138
  )
139
 
140
+ # 3. Place the controls in an Accordion for display (they are still linked!)
141
  with gr.Accordion("Configurations", open=False):
142
+ gr.Markdown("### Change Model and Generation Settings")
143
+ gr.Row([model_choice, max_length, temperature])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  if __name__ == "__main__":
146
  demo.launch()