matveymih commited on
Commit
50f6a03
Β·
verified Β·
1 Parent(s): 6c7eb2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -60,7 +60,7 @@ def update_description(task_name: str) -> str:
60
 
61
  with gr.Blocks() as demo:
62
  gr.Markdown("# πŸ”¬ LLM-Microscope β€” Understanding Token Representations in Transformers")
63
- gr.Markdown("Select a model, a mode of analysis, and a sentence. The tool will visualize what’s happening **inside** the language model β€” layer by layer, token by token.")
64
 
65
  with gr.Row():
66
  model_selector = gr.Dropdown(
@@ -114,6 +114,12 @@ This heatmap shows **how each token is processed** across layers of a language m
114
  - `Tokenwise loss without i-th layer`: shows how much each token depends on a specific layer. Red means performance drops if we skip this layer.
115
 
116
  Use this tool to **peek inside the black box** β€” it reveals which layers matter most, which tokens carry the most memory, and how LLMs evolve their predictions.
 
 
 
 
 
 
117
  """)
118
 
119
  task_selector.change(fn=update_description, inputs=[task_selector], outputs=[task_description])
 
60
 
61
  with gr.Blocks() as demo:
62
  gr.Markdown("# πŸ”¬ LLM-Microscope β€” Understanding Token Representations in Transformers")
63
+ gr.Markdown("Select a model, analysis mode, and input β€” then peek inside the black box to see which layers matter most, which tokens carry the most memory, and how predictions evolve.")
64
 
65
  with gr.Row():
66
  model_selector = gr.Dropdown(
 
114
  - `Tokenwise loss without i-th layer`: shows how much each token depends on a specific layer. Red means performance drops if we skip this layer.
115
 
116
  Use this tool to **peek inside the black box** β€” it reveals which layers matter most, which tokens carry the most memory, and how LLMs evolve their predictions.
117
+
118
+ You can also use `llm-microscope` as a Python library to run these analyses on **your own models and data**.
119
+
120
+ Just install it with: `pip install llm-microscope`
121
+
122
+ More details provided in [GitHub repo](https://github.com/AIRI-Institute/LLM-Microscope).
123
  """)
124
 
125
  task_selector.change(fn=update_description, inputs=[task_selector], outputs=[task_description])