Spaces:
Sleeping
Sleeping
Commit
·
1f82605
1
Parent(s):
fbc8f04
update
Browse files
app.py
CHANGED
@@ -129,11 +129,17 @@ with gr.Blocks(
|
|
129 |
"""
|
130 |
# 🧠 TinyV - Answer Verification Tool
|
131 |
|
132 |
-
This tool verifies if a model-generated answer is semantically correct compared to a ground truth answer
|
|
|
133 |
""",
|
134 |
elem_classes="title"
|
135 |
)
|
136 |
|
|
|
|
|
|
|
|
|
|
|
137 |
# Main input area
|
138 |
with gr.Row(equal_height=True):
|
139 |
# Left column - Inputs
|
@@ -184,9 +190,14 @@ with gr.Blocks(
|
|
184 |
# Advanced Settings (hidden at the bottom)
|
185 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
186 |
with gr.Row():
|
187 |
-
|
188 |
-
|
189 |
-
|
|
|
|
|
|
|
|
|
|
|
190 |
|
191 |
# About section
|
192 |
with gr.Accordion("ℹ️ About This Tool", open=False):
|
|
|
129 |
"""
|
130 |
# 🧠 TinyV - Answer Verification Tool
|
131 |
|
132 |
+
This tool verifies if a model-generated answer is semantically correct compared to a ground truth answer,
|
133 |
+
even if there are minor differences in formatting or wording.
|
134 |
""",
|
135 |
elem_classes="title"
|
136 |
)
|
137 |
|
138 |
+
# Define these variables first so they can be used by the example loading function
|
139 |
+
temperature = gr.State(value=0.3)
|
140 |
+
top_p = gr.State(value=0.95)
|
141 |
+
max_tokens = gr.State(value=1)
|
142 |
+
|
143 |
# Main input area
|
144 |
with gr.Row(equal_height=True):
|
145 |
# Left column - Inputs
|
|
|
190 |
# Advanced Settings (hidden at the bottom)
|
191 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
192 |
with gr.Row():
|
193 |
+
temp_slider = gr.Slider(0, 1, value=0.3, step=0.1, label="Temperature")
|
194 |
+
top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
195 |
+
max_tokens_slider = gr.Slider(1, 128, value=1, step=1, label="Max Tokens")
|
196 |
+
|
197 |
+
# Connect sliders to state values
|
198 |
+
temp_slider.change(lambda x: x, inputs=[temp_slider], outputs=[temperature])
|
199 |
+
top_p_slider.change(lambda x: x, inputs=[top_p_slider], outputs=[top_p])
|
200 |
+
max_tokens_slider.change(lambda x: x, inputs=[max_tokens_slider], outputs=[max_tokens])
|
201 |
|
202 |
# About section
|
203 |
with gr.Accordion("ℹ️ About This Tool", open=False):
|