Spaces:
Running
on
Zero
Running
on
Zero
Commit
Β·
dd36917
1
Parent(s):
284a5ca
update
Browse files- app.py +12 -91
- app_no_config.py β app_config.py +91 -12
app.py
CHANGED
|
@@ -25,8 +25,6 @@ nltk.download("punkt", download_dir="/home/user/nltk_data")
|
|
| 25 |
nltk.data.path.append("/home/user/nltk_data")
|
| 26 |
from nltk.tokenize import sent_tokenize
|
| 27 |
|
| 28 |
-
DEFAULT_TOP_K = 3
|
| 29 |
-
|
| 30 |
# Load original app constants
|
| 31 |
APP_TITLE = '<div class="app-title"><span class="brand">AttnTrace: </span><span class="subtitle">Attention-based Context Traceback for Long-Context LLMs</span></div>'
|
| 32 |
APP_DESCRIPTION = """AttnTrace traces a model's generated statements back to specific parts of the context using attention-based traceback. Try it out with Meta-Llama-3.1-8B-Instruct here! See the [[paper](https://arxiv.org/abs/2506.04202)] and [[code](https://github.com/Wang-Yanting/TracLLM-Kit)] for more!
|
|
@@ -84,46 +82,10 @@ current_attr = None
|
|
| 84 |
current_model_path = None
|
| 85 |
current_explanation_level = None
|
| 86 |
current_api_key = None
|
| 87 |
-
current_top_k = 3 # Add top-k tracking
|
| 88 |
-
|
| 89 |
-
def update_configuration(explanation_level, top_k):
|
| 90 |
-
"""Update the global configuration and reinitialize attribution if needed"""
|
| 91 |
-
global current_explanation_level, current_top_k, current_attr, current_llm
|
| 92 |
-
|
| 93 |
-
# Convert top_k to int
|
| 94 |
-
top_k = int(top_k)
|
| 95 |
-
|
| 96 |
-
# Check if configuration has changed
|
| 97 |
-
config_changed = (current_explanation_level != explanation_level or
|
| 98 |
-
current_top_k != top_k)
|
| 99 |
-
|
| 100 |
-
if config_changed:
|
| 101 |
-
print(f"π Updating configuration: explanation_level={explanation_level}, top_k={top_k}")
|
| 102 |
-
current_explanation_level = explanation_level
|
| 103 |
-
current_top_k = top_k
|
| 104 |
-
|
| 105 |
-
DEFAULT_EXPLANATION_LEVEL = explanation_level
|
| 106 |
-
DEFAULT_TOP_K = top_k
|
| 107 |
-
|
| 108 |
-
# Reset both model and attribution to force complete reinitialization
|
| 109 |
-
current_llm = None
|
| 110 |
-
current_attr = None
|
| 111 |
-
|
| 112 |
-
# Reinitialize with new configuration
|
| 113 |
-
try:
|
| 114 |
-
llm, attr, error_msg = initialize_model_and_attr()
|
| 115 |
-
if llm is not None and attr is not None:
|
| 116 |
-
return gr.update(value=f"β
Configuration updated: {explanation_level} level, top-{top_k}")
|
| 117 |
-
else:
|
| 118 |
-
return gr.update(value=f"β Error reinitializing: {error_msg}")
|
| 119 |
-
except Exception as e:
|
| 120 |
-
return gr.update(value=f"β Error updating configuration: {str(e)}")
|
| 121 |
-
else:
|
| 122 |
-
return gr.update(value="βΉοΈ Configuration unchanged")
|
| 123 |
|
| 124 |
def initialize_model_and_attr():
|
| 125 |
"""Initialize model and attribution with default configuration"""
|
| 126 |
-
global current_llm, current_attr, current_model_path, current_explanation_level, current_api_key
|
| 127 |
|
| 128 |
try:
|
| 129 |
# Check if we need to reinitialize the model
|
|
@@ -133,7 +95,7 @@ def initialize_model_and_attr():
|
|
| 133 |
|
| 134 |
# Check if we need to update attribution
|
| 135 |
need_attr_update = (current_attr is None or
|
| 136 |
-
current_explanation_level !=
|
| 137 |
need_model_update)
|
| 138 |
|
| 139 |
if need_model_update:
|
|
@@ -144,20 +106,15 @@ def initialize_model_and_attr():
|
|
| 144 |
current_api_key = effective_api_key
|
| 145 |
|
| 146 |
if need_attr_update:
|
| 147 |
-
|
| 148 |
-
explanation_level = current_explanation_level or DEFAULT_EXPLANATION_LEVEL
|
| 149 |
-
top_k = current_top_k or 3
|
| 150 |
-
if "segment" in DEFAULT_EXPLANATION_LEVEL:
|
| 151 |
-
DEFAULT_EXPLANATION_LEVEL = "segment"
|
| 152 |
-
print(f"Initializing context traceback with explanation level: {explanation_level}, top_k: {top_k}")
|
| 153 |
current_attr = AttnTraceAttribution(
|
| 154 |
current_llm,
|
| 155 |
-
explanation_level=
|
| 156 |
-
K=
|
| 157 |
q=0.4,
|
| 158 |
B=30
|
| 159 |
)
|
| 160 |
-
current_explanation_level =
|
| 161 |
|
| 162 |
return current_llm, current_attr, None
|
| 163 |
|
|
@@ -250,9 +207,9 @@ def generate_model_response(state: State):
|
|
| 250 |
print("β Validation failed: No query")
|
| 251 |
return state, gr.update(value=[("β Please enter a query before generating response! If you just changed configuration, try reloading an example.", None)], visible=True)
|
| 252 |
|
| 253 |
-
# Initialize model and attribution with
|
| 254 |
-
print(f"π§ Generating response with explanation_level: {
|
| 255 |
-
llm, attr, error_msg = initialize_model_and_attr()
|
| 256 |
|
| 257 |
if llm is None or attr is None:
|
| 258 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
@@ -381,7 +338,7 @@ def unified_response_handler(response_text: str, state: State):
|
|
| 381 |
)
|
| 382 |
|
| 383 |
# Initialize model and generate response
|
| 384 |
-
llm, attr, error_msg = initialize_model_and_attr()
|
| 385 |
|
| 386 |
if llm is None:
|
| 387 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
@@ -479,7 +436,7 @@ def basic_get_scores_and_sources_full_response(state: State):
|
|
| 479 |
state.explained_response_part = state.full_response
|
| 480 |
|
| 481 |
# Attribution using default configuration
|
| 482 |
-
|
| 483 |
|
| 484 |
if attr is None:
|
| 485 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
@@ -694,7 +651,7 @@ def basic_get_scores_and_sources(
|
|
| 694 |
state.explained_response_part = selected_text
|
| 695 |
|
| 696 |
# Attribution using default configuration
|
| 697 |
-
|
| 698 |
|
| 699 |
if attr is None:
|
| 700 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
@@ -1000,36 +957,6 @@ with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
| 1000 |
'**Color Legend for Context Traceback (by ranking):** <span style="background-color: #FF4444; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Red</span> = 1st (most important) | <span style="background-color: #FF8C42; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Orange</span> = 2nd | <span style="background-color: #FFD93D; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Golden</span> = 3rd | <span style="background-color: #FFF280; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Yellow</span> = 4th-5th | <span style="background-color: #FFF9C4; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Light</span> = 6th+'
|
| 1001 |
)
|
| 1002 |
|
| 1003 |
-
# Configuration bar
|
| 1004 |
-
with gr.Row():
|
| 1005 |
-
with gr.Column(scale=1):
|
| 1006 |
-
explanation_level_dropdown = gr.Dropdown(
|
| 1007 |
-
choices=["sentence", "paragraph", "text segment"],
|
| 1008 |
-
value="sentence",
|
| 1009 |
-
label="Explanation Level",
|
| 1010 |
-
info="How to segment the context for traceback analysis"
|
| 1011 |
-
)
|
| 1012 |
-
with gr.Column(scale=1):
|
| 1013 |
-
top_k_dropdown = gr.Dropdown(
|
| 1014 |
-
choices=["3", "5", "10"],
|
| 1015 |
-
value="5",
|
| 1016 |
-
label="Top-K Value",
|
| 1017 |
-
info="Number of most important text segments to highlight"
|
| 1018 |
-
)
|
| 1019 |
-
with gr.Column(scale=1):
|
| 1020 |
-
apply_config_button = gr.Button(
|
| 1021 |
-
"Apply Configuration",
|
| 1022 |
-
variant="secondary",
|
| 1023 |
-
size="sm"
|
| 1024 |
-
)
|
| 1025 |
-
with gr.Column(scale=2):
|
| 1026 |
-
config_status_text = gr.Textbox(
|
| 1027 |
-
label="Configuration Status",
|
| 1028 |
-
value="Ready to apply configuration",
|
| 1029 |
-
interactive=False,
|
| 1030 |
-
lines=1
|
| 1031 |
-
)
|
| 1032 |
-
|
| 1033 |
|
| 1034 |
# Top section: Wide Context box with tabs
|
| 1035 |
with gr.Row():
|
|
@@ -1282,12 +1209,6 @@ with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
| 1282 |
outputs=[state, response_input_box, basic_response_box, basic_generate_error_box]
|
| 1283 |
)
|
| 1284 |
|
| 1285 |
-
# Configuration update handler
|
| 1286 |
-
apply_config_button.click(
|
| 1287 |
-
fn=update_configuration,
|
| 1288 |
-
inputs=[explanation_level_dropdown, top_k_dropdown],
|
| 1289 |
-
outputs=[config_status_text]
|
| 1290 |
-
)
|
| 1291 |
|
| 1292 |
# gr.Markdown(
|
| 1293 |
# "Please do not interact with elements while generation/attribution is in progress. This may cause errors. You can refresh the page if you run into issues because of this."
|
|
|
|
| 25 |
nltk.data.path.append("/home/user/nltk_data")
|
| 26 |
from nltk.tokenize import sent_tokenize
|
| 27 |
|
|
|
|
|
|
|
| 28 |
# Load original app constants
|
| 29 |
APP_TITLE = '<div class="app-title"><span class="brand">AttnTrace: </span><span class="subtitle">Attention-based Context Traceback for Long-Context LLMs</span></div>'
|
| 30 |
APP_DESCRIPTION = """AttnTrace traces a model's generated statements back to specific parts of the context using attention-based traceback. Try it out with Meta-Llama-3.1-8B-Instruct here! See the [[paper](https://arxiv.org/abs/2506.04202)] and [[code](https://github.com/Wang-Yanting/TracLLM-Kit)] for more!
|
|
|
|
| 82 |
current_model_path = None
|
| 83 |
current_explanation_level = None
|
| 84 |
current_api_key = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
def initialize_model_and_attr():
|
| 87 |
"""Initialize model and attribution with default configuration"""
|
| 88 |
+
global current_llm, current_attr, current_model_path, current_explanation_level, current_api_key
|
| 89 |
|
| 90 |
try:
|
| 91 |
# Check if we need to reinitialize the model
|
|
|
|
| 95 |
|
| 96 |
# Check if we need to update attribution
|
| 97 |
need_attr_update = (current_attr is None or
|
| 98 |
+
current_explanation_level != DEFAULT_EXPLANATION_LEVEL or
|
| 99 |
need_model_update)
|
| 100 |
|
| 101 |
if need_model_update:
|
|
|
|
| 106 |
current_api_key = effective_api_key
|
| 107 |
|
| 108 |
if need_attr_update:
|
| 109 |
+
print(f"Initializing context traceback with explanation level: {DEFAULT_EXPLANATION_LEVEL}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
current_attr = AttnTraceAttribution(
|
| 111 |
current_llm,
|
| 112 |
+
explanation_level=DEFAULT_EXPLANATION_LEVEL,
|
| 113 |
+
K=3,
|
| 114 |
q=0.4,
|
| 115 |
B=30
|
| 116 |
)
|
| 117 |
+
current_explanation_level = DEFAULT_EXPLANATION_LEVEL
|
| 118 |
|
| 119 |
return current_llm, current_attr, None
|
| 120 |
|
|
|
|
| 207 |
print("β Validation failed: No query")
|
| 208 |
return state, gr.update(value=[("β Please enter a query before generating response! If you just changed configuration, try reloading an example.", None)], visible=True)
|
| 209 |
|
| 210 |
+
# Initialize model and attribution with default configuration
|
| 211 |
+
print(f"π§ Generating response with explanation_level: {DEFAULT_EXPLANATION_LEVEL}")
|
| 212 |
+
#llm, attr, error_msg = initialize_model_and_attr()
|
| 213 |
|
| 214 |
if llm is None or attr is None:
|
| 215 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
|
|
| 338 |
)
|
| 339 |
|
| 340 |
# Initialize model and generate response
|
| 341 |
+
#llm, attr, error_msg = initialize_model_and_attr()
|
| 342 |
|
| 343 |
if llm is None:
|
| 344 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
|
|
| 436 |
state.explained_response_part = state.full_response
|
| 437 |
|
| 438 |
# Attribution using default configuration
|
| 439 |
+
#_, attr, error_msg = initialize_model_and_attr()
|
| 440 |
|
| 441 |
if attr is None:
|
| 442 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
|
|
| 651 |
state.explained_response_part = selected_text
|
| 652 |
|
| 653 |
# Attribution using default configuration
|
| 654 |
+
#_, attr, error_msg = initialize_model_and_attr()
|
| 655 |
|
| 656 |
if attr is None:
|
| 657 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
|
|
| 957 |
'**Color Legend for Context Traceback (by ranking):** <span style="background-color: #FF4444; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Red</span> = 1st (most important) | <span style="background-color: #FF8C42; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Orange</span> = 2nd | <span style="background-color: #FFD93D; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Golden</span> = 3rd | <span style="background-color: #FFF280; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Yellow</span> = 4th-5th | <span style="background-color: #FFF9C4; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Light</span> = 6th+'
|
| 958 |
)
|
| 959 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 960 |
|
| 961 |
# Top section: Wide Context box with tabs
|
| 962 |
with gr.Row():
|
|
|
|
| 1209 |
outputs=[state, response_input_box, basic_response_box, basic_generate_error_box]
|
| 1210 |
)
|
| 1211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1212 |
|
| 1213 |
# gr.Markdown(
|
| 1214 |
# "Please do not interact with elements while generation/attribution is in progress. This may cause errors. You can refresh the page if you run into issues because of this."
|
app_no_config.py β app_config.py
RENAMED
|
@@ -25,6 +25,8 @@ nltk.download("punkt", download_dir="/home/user/nltk_data")
|
|
| 25 |
nltk.data.path.append("/home/user/nltk_data")
|
| 26 |
from nltk.tokenize import sent_tokenize
|
| 27 |
|
|
|
|
|
|
|
| 28 |
# Load original app constants
|
| 29 |
APP_TITLE = '<div class="app-title"><span class="brand">AttnTrace: </span><span class="subtitle">Attention-based Context Traceback for Long-Context LLMs</span></div>'
|
| 30 |
APP_DESCRIPTION = """AttnTrace traces a model's generated statements back to specific parts of the context using attention-based traceback. Try it out with Meta-Llama-3.1-8B-Instruct here! See the [[paper](https://arxiv.org/abs/2506.04202)] and [[code](https://github.com/Wang-Yanting/TracLLM-Kit)] for more!
|
|
@@ -82,10 +84,46 @@ current_attr = None
|
|
| 82 |
current_model_path = None
|
| 83 |
current_explanation_level = None
|
| 84 |
current_api_key = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
def initialize_model_and_attr():
|
| 87 |
"""Initialize model and attribution with default configuration"""
|
| 88 |
-
global current_llm, current_attr, current_model_path, current_explanation_level, current_api_key
|
| 89 |
|
| 90 |
try:
|
| 91 |
# Check if we need to reinitialize the model
|
|
@@ -95,7 +133,7 @@ def initialize_model_and_attr():
|
|
| 95 |
|
| 96 |
# Check if we need to update attribution
|
| 97 |
need_attr_update = (current_attr is None or
|
| 98 |
-
current_explanation_level != DEFAULT_EXPLANATION_LEVEL or
|
| 99 |
need_model_update)
|
| 100 |
|
| 101 |
if need_model_update:
|
|
@@ -106,15 +144,20 @@ def initialize_model_and_attr():
|
|
| 106 |
current_api_key = effective_api_key
|
| 107 |
|
| 108 |
if need_attr_update:
|
| 109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
current_attr = AttnTraceAttribution(
|
| 111 |
current_llm,
|
| 112 |
-
explanation_level=DEFAULT_EXPLANATION_LEVEL,
|
| 113 |
-
K=
|
| 114 |
q=0.4,
|
| 115 |
B=30
|
| 116 |
)
|
| 117 |
-
current_explanation_level =
|
| 118 |
|
| 119 |
return current_llm, current_attr, None
|
| 120 |
|
|
@@ -207,9 +250,9 @@ def generate_model_response(state: State):
|
|
| 207 |
print("β Validation failed: No query")
|
| 208 |
return state, gr.update(value=[("β Please enter a query before generating response! If you just changed configuration, try reloading an example.", None)], visible=True)
|
| 209 |
|
| 210 |
-
# Initialize model and attribution with
|
| 211 |
-
print(f"π§ Generating response with explanation_level: {DEFAULT_EXPLANATION_LEVEL}")
|
| 212 |
-
|
| 213 |
|
| 214 |
if llm is None or attr is None:
|
| 215 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
@@ -338,7 +381,7 @@ def unified_response_handler(response_text: str, state: State):
|
|
| 338 |
)
|
| 339 |
|
| 340 |
# Initialize model and generate response
|
| 341 |
-
|
| 342 |
|
| 343 |
if llm is None:
|
| 344 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
@@ -436,7 +479,7 @@ def basic_get_scores_and_sources_full_response(state: State):
|
|
| 436 |
state.explained_response_part = state.full_response
|
| 437 |
|
| 438 |
# Attribution using default configuration
|
| 439 |
-
|
| 440 |
|
| 441 |
if attr is None:
|
| 442 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
@@ -651,7 +694,7 @@ def basic_get_scores_and_sources(
|
|
| 651 |
state.explained_response_part = selected_text
|
| 652 |
|
| 653 |
# Attribution using default configuration
|
| 654 |
-
|
| 655 |
|
| 656 |
if attr is None:
|
| 657 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
@@ -957,6 +1000,36 @@ with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
| 957 |
'**Color Legend for Context Traceback (by ranking):** <span style="background-color: #FF4444; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Red</span> = 1st (most important) | <span style="background-color: #FF8C42; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Orange</span> = 2nd | <span style="background-color: #FFD93D; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Golden</span> = 3rd | <span style="background-color: #FFF280; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Yellow</span> = 4th-5th | <span style="background-color: #FFF9C4; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Light</span> = 6th+'
|
| 958 |
)
|
| 959 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 960 |
|
| 961 |
# Top section: Wide Context box with tabs
|
| 962 |
with gr.Row():
|
|
@@ -1209,6 +1282,12 @@ with gr.Blocks(theme=theme, css=custom_css) as demo:
|
|
| 1209 |
outputs=[state, response_input_box, basic_response_box, basic_generate_error_box]
|
| 1210 |
)
|
| 1211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1212 |
|
| 1213 |
# gr.Markdown(
|
| 1214 |
# "Please do not interact with elements while generation/attribution is in progress. This may cause errors. You can refresh the page if you run into issues because of this."
|
|
|
|
| 25 |
nltk.data.path.append("/home/user/nltk_data")
|
| 26 |
from nltk.tokenize import sent_tokenize
|
| 27 |
|
| 28 |
+
DEFAULT_TOP_K = 3
|
| 29 |
+
|
| 30 |
# Load original app constants
|
| 31 |
APP_TITLE = '<div class="app-title"><span class="brand">AttnTrace: </span><span class="subtitle">Attention-based Context Traceback for Long-Context LLMs</span></div>'
|
| 32 |
APP_DESCRIPTION = """AttnTrace traces a model's generated statements back to specific parts of the context using attention-based traceback. Try it out with Meta-Llama-3.1-8B-Instruct here! See the [[paper](https://arxiv.org/abs/2506.04202)] and [[code](https://github.com/Wang-Yanting/TracLLM-Kit)] for more!
|
|
|
|
| 84 |
current_model_path = None
|
| 85 |
current_explanation_level = None
|
| 86 |
current_api_key = None
|
| 87 |
+
current_top_k = 3 # Add top-k tracking
|
| 88 |
+
|
| 89 |
+
def update_configuration(explanation_level, top_k):
|
| 90 |
+
"""Update the global configuration and reinitialize attribution if needed"""
|
| 91 |
+
global current_explanation_level, current_top_k, current_attr, current_llm
|
| 92 |
+
|
| 93 |
+
# Convert top_k to int
|
| 94 |
+
top_k = int(top_k)
|
| 95 |
+
|
| 96 |
+
# Check if configuration has changed
|
| 97 |
+
config_changed = (current_explanation_level != explanation_level or
|
| 98 |
+
current_top_k != top_k)
|
| 99 |
+
|
| 100 |
+
if config_changed:
|
| 101 |
+
print(f"π Updating configuration: explanation_level={explanation_level}, top_k={top_k}")
|
| 102 |
+
current_explanation_level = explanation_level
|
| 103 |
+
current_top_k = top_k
|
| 104 |
+
|
| 105 |
+
DEFAULT_EXPLANATION_LEVEL = explanation_level
|
| 106 |
+
DEFAULT_TOP_K = top_k
|
| 107 |
+
|
| 108 |
+
# Reset both model and attribution to force complete reinitialization
|
| 109 |
+
current_llm = None
|
| 110 |
+
current_attr = None
|
| 111 |
+
|
| 112 |
+
# Reinitialize with new configuration
|
| 113 |
+
try:
|
| 114 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 115 |
+
if llm is not None and attr is not None:
|
| 116 |
+
return gr.update(value=f"β
Configuration updated: {explanation_level} level, top-{top_k}")
|
| 117 |
+
else:
|
| 118 |
+
return gr.update(value=f"β Error reinitializing: {error_msg}")
|
| 119 |
+
except Exception as e:
|
| 120 |
+
return gr.update(value=f"β Error updating configuration: {str(e)}")
|
| 121 |
+
else:
|
| 122 |
+
return gr.update(value="βΉοΈ Configuration unchanged")
|
| 123 |
|
| 124 |
def initialize_model_and_attr():
|
| 125 |
"""Initialize model and attribution with default configuration"""
|
| 126 |
+
global current_llm, current_attr, current_model_path, current_explanation_level, current_api_key, current_top_k
|
| 127 |
|
| 128 |
try:
|
| 129 |
# Check if we need to reinitialize the model
|
|
|
|
| 133 |
|
| 134 |
# Check if we need to update attribution
|
| 135 |
need_attr_update = (current_attr is None or
|
| 136 |
+
current_explanation_level != (current_explanation_level or DEFAULT_EXPLANATION_LEVEL) or
|
| 137 |
need_model_update)
|
| 138 |
|
| 139 |
if need_model_update:
|
|
|
|
| 144 |
current_api_key = effective_api_key
|
| 145 |
|
| 146 |
if need_attr_update:
|
| 147 |
+
# Use current configuration or defaults
|
| 148 |
+
explanation_level = current_explanation_level or DEFAULT_EXPLANATION_LEVEL
|
| 149 |
+
top_k = current_top_k or 3
|
| 150 |
+
if "segment" in DEFAULT_EXPLANATION_LEVEL:
|
| 151 |
+
DEFAULT_EXPLANATION_LEVEL = "segment"
|
| 152 |
+
print(f"Initializing context traceback with explanation level: {explanation_level}, top_k: {top_k}")
|
| 153 |
current_attr = AttnTraceAttribution(
|
| 154 |
current_llm,
|
| 155 |
+
explanation_level= DEFAULT_EXPLANATION_LEVEL,
|
| 156 |
+
K=DEFAULT_TOP_K,
|
| 157 |
q=0.4,
|
| 158 |
B=30
|
| 159 |
)
|
| 160 |
+
current_explanation_level = explanation_level
|
| 161 |
|
| 162 |
return current_llm, current_attr, None
|
| 163 |
|
|
|
|
| 250 |
print("β Validation failed: No query")
|
| 251 |
return state, gr.update(value=[("β Please enter a query before generating response! If you just changed configuration, try reloading an example.", None)], visible=True)
|
| 252 |
|
| 253 |
+
# Initialize model and attribution with current configuration
|
| 254 |
+
print(f"π§ Generating response with explanation_level: {current_explanation_level or DEFAULT_EXPLANATION_LEVEL}, top_k: {current_top_k or 3}")
|
| 255 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 256 |
|
| 257 |
if llm is None or attr is None:
|
| 258 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
|
|
| 381 |
)
|
| 382 |
|
| 383 |
# Initialize model and generate response
|
| 384 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 385 |
|
| 386 |
if llm is None:
|
| 387 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
|
|
| 479 |
state.explained_response_part = state.full_response
|
| 480 |
|
| 481 |
# Attribution using default configuration
|
| 482 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 483 |
|
| 484 |
if attr is None:
|
| 485 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
|
|
| 694 |
state.explained_response_part = selected_text
|
| 695 |
|
| 696 |
# Attribution using default configuration
|
| 697 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 698 |
|
| 699 |
if attr is None:
|
| 700 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
|
|
| 1000 |
'**Color Legend for Context Traceback (by ranking):** <span style="background-color: #FF4444; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Red</span> = 1st (most important) | <span style="background-color: #FF8C42; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Orange</span> = 2nd | <span style="background-color: #FFD93D; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Golden</span> = 3rd | <span style="background-color: #FFF280; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Yellow</span> = 4th-5th | <span style="background-color: #FFF9C4; color: black; padding: 2px 6px; border-radius: 4px; font-weight: 600;">Light</span> = 6th+'
|
| 1001 |
)
|
| 1002 |
|
| 1003 |
+
# Configuration bar
|
| 1004 |
+
with gr.Row():
|
| 1005 |
+
with gr.Column(scale=1):
|
| 1006 |
+
explanation_level_dropdown = gr.Dropdown(
|
| 1007 |
+
choices=["sentence", "paragraph", "text segment"],
|
| 1008 |
+
value="sentence",
|
| 1009 |
+
label="Explanation Level",
|
| 1010 |
+
info="How to segment the context for traceback analysis"
|
| 1011 |
+
)
|
| 1012 |
+
with gr.Column(scale=1):
|
| 1013 |
+
top_k_dropdown = gr.Dropdown(
|
| 1014 |
+
choices=["3", "5", "10"],
|
| 1015 |
+
value="5",
|
| 1016 |
+
label="Top-K Value",
|
| 1017 |
+
info="Number of most important text segments to highlight"
|
| 1018 |
+
)
|
| 1019 |
+
with gr.Column(scale=1):
|
| 1020 |
+
apply_config_button = gr.Button(
|
| 1021 |
+
"Apply Configuration",
|
| 1022 |
+
variant="secondary",
|
| 1023 |
+
size="sm"
|
| 1024 |
+
)
|
| 1025 |
+
with gr.Column(scale=2):
|
| 1026 |
+
config_status_text = gr.Textbox(
|
| 1027 |
+
label="Configuration Status",
|
| 1028 |
+
value="Ready to apply configuration",
|
| 1029 |
+
interactive=False,
|
| 1030 |
+
lines=1
|
| 1031 |
+
)
|
| 1032 |
+
|
| 1033 |
|
| 1034 |
# Top section: Wide Context box with tabs
|
| 1035 |
with gr.Row():
|
|
|
|
| 1282 |
outputs=[state, response_input_box, basic_response_box, basic_generate_error_box]
|
| 1283 |
)
|
| 1284 |
|
| 1285 |
+
# Configuration update handler
|
| 1286 |
+
apply_config_button.click(
|
| 1287 |
+
fn=update_configuration,
|
| 1288 |
+
inputs=[explanation_level_dropdown, top_k_dropdown],
|
| 1289 |
+
outputs=[config_status_text]
|
| 1290 |
+
)
|
| 1291 |
|
| 1292 |
# gr.Markdown(
|
| 1293 |
# "Please do not interact with elements while generation/attribution is in progress. This may cause errors. You can refresh the page if you run into issues because of this."
|