Spaces:
Running
on
Zero
Running
on
Zero
Commit
Β·
284a5ca
1
Parent(s):
c28f525
update4
Browse files
app.py
CHANGED
|
@@ -25,6 +25,8 @@ nltk.download("punkt", download_dir="/home/user/nltk_data")
|
|
| 25 |
nltk.data.path.append("/home/user/nltk_data")
|
| 26 |
from nltk.tokenize import sent_tokenize
|
| 27 |
|
|
|
|
|
|
|
| 28 |
# Load original app constants
|
| 29 |
APP_TITLE = '<div class="app-title"><span class="brand">AttnTrace: </span><span class="subtitle">Attention-based Context Traceback for Long-Context LLMs</span></div>'
|
| 30 |
APP_DESCRIPTION = """AttnTrace traces a model's generated statements back to specific parts of the context using attention-based traceback. Try it out with Meta-Llama-3.1-8B-Instruct here! See the [[paper](https://arxiv.org/abs/2506.04202)] and [[code](https://github.com/Wang-Yanting/TracLLM-Kit)] for more!
|
|
@@ -86,7 +88,7 @@ current_top_k = 3 # Add top-k tracking
|
|
| 86 |
|
| 87 |
def update_configuration(explanation_level, top_k):
|
| 88 |
"""Update the global configuration and reinitialize attribution if needed"""
|
| 89 |
-
global current_explanation_level, current_top_k, current_attr
|
| 90 |
|
| 91 |
# Convert top_k to int
|
| 92 |
top_k = int(top_k)
|
|
@@ -99,11 +101,23 @@ def update_configuration(explanation_level, top_k):
|
|
| 99 |
print(f"π Updating configuration: explanation_level={explanation_level}, top_k={top_k}")
|
| 100 |
current_explanation_level = explanation_level
|
| 101 |
current_top_k = top_k
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
-
# Reset attribution to force reinitialization
|
|
|
|
| 104 |
current_attr = None
|
| 105 |
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
else:
|
| 108 |
return gr.update(value="βΉοΈ Configuration unchanged")
|
| 109 |
|
|
@@ -133,12 +147,13 @@ def initialize_model_and_attr():
|
|
| 133 |
# Use current configuration or defaults
|
| 134 |
explanation_level = current_explanation_level or DEFAULT_EXPLANATION_LEVEL
|
| 135 |
top_k = current_top_k or 3
|
| 136 |
-
|
|
|
|
| 137 |
print(f"Initializing context traceback with explanation level: {explanation_level}, top_k: {top_k}")
|
| 138 |
current_attr = AttnTraceAttribution(
|
| 139 |
current_llm,
|
| 140 |
-
explanation_level=
|
| 141 |
-
K=
|
| 142 |
q=0.4,
|
| 143 |
B=30
|
| 144 |
)
|
|
@@ -235,9 +250,9 @@ def generate_model_response(state: State):
|
|
| 235 |
print("β Validation failed: No query")
|
| 236 |
return state, gr.update(value=[("β Please enter a query before generating response! If you just changed configuration, try reloading an example.", None)], visible=True)
|
| 237 |
|
| 238 |
-
# Initialize model and attribution with
|
| 239 |
-
print(f"π§ Generating response with explanation_level: {DEFAULT_EXPLANATION_LEVEL}")
|
| 240 |
-
|
| 241 |
|
| 242 |
if llm is None or attr is None:
|
| 243 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
@@ -366,7 +381,7 @@ def unified_response_handler(response_text: str, state: State):
|
|
| 366 |
)
|
| 367 |
|
| 368 |
# Initialize model and generate response
|
| 369 |
-
|
| 370 |
|
| 371 |
if llm is None:
|
| 372 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
@@ -464,7 +479,7 @@ def basic_get_scores_and_sources_full_response(state: State):
|
|
| 464 |
state.explained_response_part = state.full_response
|
| 465 |
|
| 466 |
# Attribution using default configuration
|
| 467 |
-
|
| 468 |
|
| 469 |
if attr is None:
|
| 470 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
@@ -679,7 +694,7 @@ def basic_get_scores_and_sources(
|
|
| 679 |
state.explained_response_part = selected_text
|
| 680 |
|
| 681 |
# Attribution using default configuration
|
| 682 |
-
|
| 683 |
|
| 684 |
if attr is None:
|
| 685 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
|
|
| 25 |
nltk.data.path.append("/home/user/nltk_data")
|
| 26 |
from nltk.tokenize import sent_tokenize
|
| 27 |
|
| 28 |
+
DEFAULT_TOP_K = 3
|
| 29 |
+
|
| 30 |
# Load original app constants
|
| 31 |
APP_TITLE = '<div class="app-title"><span class="brand">AttnTrace: </span><span class="subtitle">Attention-based Context Traceback for Long-Context LLMs</span></div>'
|
| 32 |
APP_DESCRIPTION = """AttnTrace traces a model's generated statements back to specific parts of the context using attention-based traceback. Try it out with Meta-Llama-3.1-8B-Instruct here! See the [[paper](https://arxiv.org/abs/2506.04202)] and [[code](https://github.com/Wang-Yanting/TracLLM-Kit)] for more!
|
|
|
|
| 88 |
|
| 89 |
def update_configuration(explanation_level, top_k):
|
| 90 |
"""Update the global configuration and reinitialize attribution if needed"""
|
| 91 |
+
global current_explanation_level, current_top_k, current_attr, current_llm
|
| 92 |
|
| 93 |
# Convert top_k to int
|
| 94 |
top_k = int(top_k)
|
|
|
|
| 101 |
print(f"π Updating configuration: explanation_level={explanation_level}, top_k={top_k}")
|
| 102 |
current_explanation_level = explanation_level
|
| 103 |
current_top_k = top_k
|
| 104 |
+
|
| 105 |
+
DEFAULT_EXPLANATION_LEVEL = explanation_level
|
| 106 |
+
DEFAULT_TOP_K = top_k
|
| 107 |
|
| 108 |
+
# Reset both model and attribution to force complete reinitialization
|
| 109 |
+
current_llm = None
|
| 110 |
current_attr = None
|
| 111 |
|
| 112 |
+
# Reinitialize with new configuration
|
| 113 |
+
try:
|
| 114 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 115 |
+
if llm is not None and attr is not None:
|
| 116 |
+
return gr.update(value=f"β
Configuration updated: {explanation_level} level, top-{top_k}")
|
| 117 |
+
else:
|
| 118 |
+
return gr.update(value=f"β Error reinitializing: {error_msg}")
|
| 119 |
+
except Exception as e:
|
| 120 |
+
return gr.update(value=f"β Error updating configuration: {str(e)}")
|
| 121 |
else:
|
| 122 |
return gr.update(value="βΉοΈ Configuration unchanged")
|
| 123 |
|
|
|
|
| 147 |
# Use current configuration or defaults
|
| 148 |
explanation_level = current_explanation_level or DEFAULT_EXPLANATION_LEVEL
|
| 149 |
top_k = current_top_k or 3
|
| 150 |
+
if "segment" in DEFAULT_EXPLANATION_LEVEL:
|
| 151 |
+
DEFAULT_EXPLANATION_LEVEL = "segment"
|
| 152 |
print(f"Initializing context traceback with explanation level: {explanation_level}, top_k: {top_k}")
|
| 153 |
current_attr = AttnTraceAttribution(
|
| 154 |
current_llm,
|
| 155 |
+
explanation_level= DEFAULT_EXPLANATION_LEVEL,
|
| 156 |
+
K=DEFAULT_TOP_K,
|
| 157 |
q=0.4,
|
| 158 |
B=30
|
| 159 |
)
|
|
|
|
| 250 |
print("β Validation failed: No query")
|
| 251 |
return state, gr.update(value=[("β Please enter a query before generating response! If you just changed configuration, try reloading an example.", None)], visible=True)
|
| 252 |
|
| 253 |
+
# Initialize model and attribution with current configuration
|
| 254 |
+
print(f"π§ Generating response with explanation_level: {current_explanation_level or DEFAULT_EXPLANATION_LEVEL}, top_k: {current_top_k or 3}")
|
| 255 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 256 |
|
| 257 |
if llm is None or attr is None:
|
| 258 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
|
|
| 381 |
)
|
| 382 |
|
| 383 |
# Initialize model and generate response
|
| 384 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 385 |
|
| 386 |
if llm is None:
|
| 387 |
error_text = error_msg if error_msg else "Model initialization failed!"
|
|
|
|
| 479 |
state.explained_response_part = state.full_response
|
| 480 |
|
| 481 |
# Attribution using default configuration
|
| 482 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 483 |
|
| 484 |
if attr is None:
|
| 485 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|
|
|
|
| 694 |
state.explained_response_part = selected_text
|
| 695 |
|
| 696 |
# Attribution using default configuration
|
| 697 |
+
llm, attr, error_msg = initialize_model_and_attr()
|
| 698 |
|
| 699 |
if attr is None:
|
| 700 |
error_text = error_msg if error_msg else "Traceback initialization failed!"
|