Adds Tool Use Examples

#2
Files changed (1) hide show
  1. app.py +371 -35
app.py CHANGED
@@ -1,8 +1,123 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer
 
 
 
3
 
4
  tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def tokenize_dialogue(dialogue_data):
8
  """
@@ -44,6 +159,161 @@ def tokenize_dialogue(dialogue_data):
44
 
45
  return gr.HighlightedText(value=decoded_text, color_map=color_map), len(token_ids)
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  def create_sample_dialogue():
48
  """
49
  Create a sample dialogue for demonstration
@@ -57,57 +327,102 @@ def create_sample_dialogue():
57
 
58
  with gr.Blocks(title="GPT-OSS Tokenizer Explorer") as demo:
59
  gr.Markdown("# GPT-OSS Tokenizer Explorer")
60
- gr.Markdown("Enter a dialogue and see how the GPT-OSS tokenizer processes it. Use the format `speaker: message` in the dialogue component.")
61
 
62
- with gr.Row():
63
- with gr.Column(scale=1):
64
- gr.Markdown("### Input Dialogue")
65
-
66
- dialogue_input = gr.Dialogue(
67
- speakers=["system", "user", "assistant"],
68
- label="Enter your dialogue",
69
- placeholder="Type 'system:', 'user:', or 'assistant:' followed by your message",
70
- show_submit_button=True,
71
- show_copy_button=True,
72
- type="dialogue",
73
- ui_mode="dialogue-only",
74
- )
75
 
76
  with gr.Row():
77
- sample_btn = gr.Button("Load Sample", variant="secondary")
78
- clear_btn = gr.Button("Clear", variant="secondary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
- with gr.Column(scale=1):
81
- gr.Markdown("### Tokenization Results")
82
-
83
- highlighted_output = gr.HighlightedText(
84
- label="Tokenized Output",
85
- show_inline_category=False
86
- )
87
 
88
- token_count = gr.Label(
89
- value="Total Tokens: 0",
90
- label="Token Count"
91
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  with gr.Accordion("How to use", open=False):
94
  gr.Markdown("""
95
- ### Instructions:
96
  1. **Enter dialogue**: Use the dialogue component to enter conversations
97
  2. **Speaker format**: Type `system:`, `user:`, or `assistant:` followed by your message
98
  3. **Submit**: Click 'Tokenize Dialogue' to process the conversation
99
  4. **View results**: See the tokenization details in the output area
100
 
101
- ### Example:
102
- ```
103
- system: You are a helpful assistant.
104
- user: Hello! How are you today?
105
- assistant: I'm doing well, thank you for asking!
106
- ```
107
 
108
  ### What you'll see:
109
  - **Total tokens**: Number of tokens in the conversation
110
- - **Tokenized output**: How the tokenizer formats the conversation
 
 
111
  """)
112
 
113
  def process_dialogue(dialogue):
@@ -121,6 +436,20 @@ with gr.Blocks(title="GPT-OSS Tokenizer Explorer") as demo:
121
  def clear_dialogue():
122
  return None, [], "Total Tokens: 0"
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  sample_btn.click(
125
  fn=create_sample_dialogue,
126
  outputs=[dialogue_input]
@@ -136,6 +465,13 @@ with gr.Blocks(title="GPT-OSS Tokenizer Explorer") as demo:
136
  inputs=[dialogue_input],
137
  outputs=[highlighted_output, token_count]
138
  )
 
 
 
 
 
 
 
139
 
140
  if __name__ == "__main__":
141
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer
3
+ import json
4
+ import random
5
+ import math
6
 
7
  tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
8
 
9
+ # Mock tool functions
10
+ def weather_tool(location, days=1):
11
+ """Get weather forecast for a location"""
12
+ weather_conditions = ["sunny", "cloudy", "rainy", "snowy", "partly cloudy"]
13
+ temps = random.randint(15, 35)
14
+ condition = random.choice(weather_conditions)
15
+ return {
16
+ "location": location,
17
+ "days": days,
18
+ "forecast": f"{condition}, {temps}°C"
19
+ }
20
+
21
+ def calculator_tool(operation, a, b=None):
22
+ """Perform mathematical calculations"""
23
+ if operation == "add":
24
+ return a + b
25
+ elif operation == "subtract":
26
+ return a - b
27
+ elif operation == "multiply":
28
+ return a * b
29
+ elif operation == "divide":
30
+ return a / b if b != 0 else "Error: Division by zero"
31
+ elif operation == "sqrt":
32
+ return math.sqrt(a)
33
+ elif operation == "power":
34
+ return a ** b
35
+ else:
36
+ return "Error: Unknown operation"
37
+
38
+ def search_tool(query, num_results=3):
39
+ """Mock web search results"""
40
+ mock_results = [
41
+ {"title": f"Result about {query} - Article 1", "url": f"https://example.com/{query.replace(' ', '-')}-1", "snippet": f"This is a comprehensive guide about {query} with detailed information..."},
42
+ {"title": f"{query} - Wikipedia", "url": f"https://en.wikipedia.org/wiki/{query.replace(' ', '_')}", "snippet": f"{query} is an important topic that covers various aspects..."},
43
+ {"title": f"Latest news on {query}", "url": f"https://news.example.com/{query.replace(' ', '-')}", "snippet": f"Recent developments and updates related to {query}..."},
44
+ ]
45
+ return mock_results[:num_results]
46
+
47
+ def code_executor_tool(code):
48
+ """Execute simple Python code (safe expressions only)"""
49
+ try:
50
+ # Only allow simple mathematical expressions for safety
51
+ allowed_names = {"__builtins__": {"abs": abs, "max": max, "min": min, "sum": sum, "len": len}}
52
+ result = eval(code, {"__builtins__": {}}, allowed_names)
53
+ return f"Result: {result}"
54
+ except Exception as e:
55
+ return f"Error: {str(e)}"
56
+
57
+ # Tool definitions for function calling
58
+ AVAILABLE_TOOLS = [
59
+ {
60
+ "type": "function",
61
+ "function": {
62
+ "name": "get_weather",
63
+ "description": "Get weather forecast for a specific location",
64
+ "parameters": {
65
+ "type": "object",
66
+ "properties": {
67
+ "location": {"type": "string", "description": "The city and state/country"},
68
+ "days": {"type": "integer", "description": "Number of days for forecast (1-7)", "default": 1}
69
+ },
70
+ "required": ["location"]
71
+ }
72
+ }
73
+ },
74
+ {
75
+ "type": "function",
76
+ "function": {
77
+ "name": "calculate",
78
+ "description": "Perform mathematical calculations",
79
+ "parameters": {
80
+ "type": "object",
81
+ "properties": {
82
+ "operation": {"type": "string", "enum": ["add", "subtract", "multiply", "divide", "sqrt", "power"]},
83
+ "a": {"type": "number", "description": "First number"},
84
+ "b": {"type": "number", "description": "Second number (not needed for sqrt)"}
85
+ },
86
+ "required": ["operation", "a"]
87
+ }
88
+ }
89
+ },
90
+ {
91
+ "type": "function",
92
+ "function": {
93
+ "name": "web_search",
94
+ "description": "Search the web for information",
95
+ "parameters": {
96
+ "type": "object",
97
+ "properties": {
98
+ "query": {"type": "string", "description": "Search query"},
99
+ "num_results": {"type": "integer", "description": "Number of results (1-10)", "default": 3}
100
+ },
101
+ "required": ["query"]
102
+ }
103
+ }
104
+ },
105
+ {
106
+ "type": "function",
107
+ "function": {
108
+ "name": "execute_code",
109
+ "description": "Execute simple Python code expressions",
110
+ "parameters": {
111
+ "type": "object",
112
+ "properties": {
113
+ "code": {"type": "string", "description": "Python code expression to execute"}
114
+ },
115
+ "required": ["code"]
116
+ }
117
+ }
118
+ }
119
+ ]
120
+
121
 
122
  def tokenize_dialogue(dialogue_data):
123
  """
 
159
 
160
  return gr.HighlightedText(value=decoded_text, color_map=color_map), len(token_ids)
161
 
162
+ def tokenize_tool_conversation(messages_with_tools):
163
+ """
164
+ Tokenize a conversation that includes tool calls and responses
165
+ """
166
+ if tokenizer is None:
167
+ raise ValueError("Tokenizer not loaded. Please check your installation.")
168
+
169
+ # Preprocess messages to handle None content
170
+ processed_messages = []
171
+ for message in messages_with_tools:
172
+ processed_message = message.copy()
173
+ if processed_message.get("content") is None:
174
+ processed_message["content"] = ""
175
+ processed_messages.append(processed_message)
176
+
177
+ formatted_input = tokenizer.apply_chat_template(
178
+ processed_messages,
179
+ add_generation_prompt=False,
180
+ return_tensors="np"
181
+ )
182
+
183
+ token_ids = formatted_input[0].tolist()
184
+ decoded_text = []
185
+ colors = ["#FF6B6B", "#4ECDC4", "#45B7D1", "#96CEB4", "#FFEAA7", "#DDA0DD", "#98FB98", "#F0E68C"]
186
+ color_map = {}
187
+
188
+ for i, token_id in enumerate(token_ids):
189
+ color = colors[i % len(colors)]
190
+ if token_id not in color_map:
191
+ color_map[str(token_id)] = color
192
+ decoded_text.append((tokenizer.decode([token_id]), str(token_id)))
193
+
194
+ return gr.HighlightedText(value=decoded_text, color_map=color_map), len(token_ids)
195
+
196
+ def execute_tool_call(tool_name, arguments):
197
+ """Execute a tool call and return the result"""
198
+ try:
199
+ if tool_name == "get_weather":
200
+ return weather_tool(**arguments)
201
+ elif tool_name == "calculate":
202
+ return calculator_tool(**arguments)
203
+ elif tool_name == "web_search":
204
+ return search_tool(**arguments)
205
+ elif tool_name == "execute_code":
206
+ return code_executor_tool(**arguments)
207
+ else:
208
+ return {"error": f"Unknown tool: {tool_name}"}
209
+ except Exception as e:
210
+ return {"error": str(e)}
211
+
212
+ def create_tool_conversation_examples():
213
+ """Create example conversations with tool use"""
214
+ examples = {
215
+ "Weather Query": [
216
+ {"role": "system", "content": "You are a helpful assistant with access to weather information."},
217
+ {"role": "user", "content": "What's the weather like in Tokyo today?"},
218
+ {
219
+ "role": "assistant",
220
+ "content": "",
221
+ "tool_calls": [
222
+ {
223
+ "id": "call_1",
224
+ "type": "function",
225
+ "function": {
226
+ "name": "get_weather",
227
+ "arguments": json.dumps({"location": "Tokyo, Japan", "days": 1})
228
+ }
229
+ }
230
+ ]
231
+ },
232
+ {
233
+ "role": "tool",
234
+ "content": json.dumps(weather_tool("Tokyo, Japan", 1)),
235
+ "tool_call_id": "call_1"
236
+ },
237
+ {"role": "assistant", "content": "The weather in Tokyo today is sunny with a temperature of 25°C. It looks like a great day to be outside!"}
238
+ ],
239
+
240
+ "Math Calculation": [
241
+ {"role": "system", "content": "You are a helpful assistant that can perform calculations."},
242
+ {"role": "user", "content": "What's 15% tip on a $87.50 bill?"},
243
+ {
244
+ "role": "assistant",
245
+ "content": "",
246
+ "tool_calls": [
247
+ {
248
+ "id": "call_2",
249
+ "type": "function",
250
+ "function": {
251
+ "name": "calculate",
252
+ "arguments": json.dumps({"operation": "multiply", "a": 87.50, "b": 0.15})
253
+ }
254
+ }
255
+ ]
256
+ },
257
+ {
258
+ "role": "tool",
259
+ "content": json.dumps({"result": calculator_tool("multiply", 87.50, 0.15)}),
260
+ "tool_call_id": "call_2"
261
+ },
262
+ {"role": "assistant", "content": "A 15% tip on an $87.50 bill would be $13.13. So your total would be $100.63."}
263
+ ],
264
+
265
+ "Web Search": [
266
+ {"role": "system", "content": "You are a helpful assistant that can search for information."},
267
+ {"role": "user", "content": "Find me information about machine learning trends in 2024"},
268
+ {
269
+ "role": "assistant",
270
+ "content": "",
271
+ "tool_calls": [
272
+ {
273
+ "id": "call_3",
274
+ "type": "function",
275
+ "function": {
276
+ "name": "web_search",
277
+ "arguments": json.dumps({"query": "machine learning trends 2024", "num_results": 3})
278
+ }
279
+ }
280
+ ]
281
+ },
282
+ {
283
+ "role": "tool",
284
+ "content": json.dumps(search_tool("machine learning trends 2024", 3)),
285
+ "tool_call_id": "call_3"
286
+ },
287
+ {"role": "assistant", "content": "I found several resources about machine learning trends in 2024. Based on the search results, key trends include advances in large language models, improved efficiency in AI training, and greater focus on responsible AI development."}
288
+ ],
289
+
290
+ "Code Execution": [
291
+ {"role": "system", "content": "You are a helpful assistant that can execute Python code."},
292
+ {"role": "user", "content": "Calculate the sum of numbers from 1 to 100"},
293
+ {
294
+ "role": "assistant",
295
+ "content": "",
296
+ "tool_calls": [
297
+ {
298
+ "id": "call_4",
299
+ "type": "function",
300
+ "function": {
301
+ "name": "execute_code",
302
+ "arguments": json.dumps({"code": "sum(range(1, 101))"})
303
+ }
304
+ }
305
+ ]
306
+ },
307
+ {
308
+ "role": "tool",
309
+ "content": json.dumps({"result": code_executor_tool("sum(range(1, 101))")}),
310
+ "tool_call_id": "call_4"
311
+ },
312
+ {"role": "assistant", "content": "The sum of numbers from 1 to 100 is 5,050."}
313
+ ]
314
+ }
315
+ return examples
316
+
317
  def create_sample_dialogue():
318
  """
319
  Create a sample dialogue for demonstration
 
327
 
328
  with gr.Blocks(title="GPT-OSS Tokenizer Explorer") as demo:
329
  gr.Markdown("# GPT-OSS Tokenizer Explorer")
330
+ gr.Markdown("Explore how the GPT-OSS tokenizer processes regular conversations and tool-calling scenarios.")
331
 
332
+ with gr.Tabs():
333
+ with gr.TabItem("Regular Dialogue"):
334
+ gr.Markdown("Enter a dialogue and see how the GPT-OSS tokenizer processes it.")
 
 
 
 
 
 
 
 
 
 
335
 
336
  with gr.Row():
337
+ with gr.Column(scale=1):
338
+ gr.Markdown("### Input Dialogue")
339
+
340
+ dialogue_input = gr.Dialogue(
341
+ speakers=["system", "user", "assistant"],
342
+ label="Enter your dialogue",
343
+ placeholder="Type 'system:', 'user:', or 'assistant:' followed by your message",
344
+ show_submit_button=True,
345
+ show_copy_button=True,
346
+ type="dialogue",
347
+ ui_mode="dialogue-only",
348
+ )
349
+
350
+ with gr.Row():
351
+ sample_btn = gr.Button("Load Sample", variant="secondary")
352
+ clear_btn = gr.Button("Clear", variant="secondary")
353
+
354
+ with gr.Column(scale=1):
355
+ gr.Markdown("### Tokenization Results")
356
+
357
+ highlighted_output = gr.HighlightedText(
358
+ label="Tokenized Output",
359
+ show_inline_category=False
360
+ )
361
+
362
+ token_count = gr.Label(
363
+ value="Total Tokens: 0",
364
+ label="Token Count"
365
+ )
366
 
367
+ with gr.TabItem("Tool Use Examples"):
368
+ gr.Markdown("See how the GPT-OSS tokenizer handles function calling and tool use conversations.")
 
 
 
 
 
369
 
370
+ with gr.Row():
371
+ with gr.Column(scale=1):
372
+ gr.Markdown("### Tool Use Scenarios")
373
+
374
+ example_dropdown = gr.Dropdown(
375
+ choices=["Weather Query", "Math Calculation", "Web Search", "Code Execution"],
376
+ value="Weather Query",
377
+ label="Select Example Scenario"
378
+ )
379
+
380
+ load_example_btn = gr.Button("Load Example", variant="primary")
381
+
382
+ gr.Markdown("### Available Tools")
383
+ tools_display = gr.JSON(
384
+ value=AVAILABLE_TOOLS,
385
+ label="Tool Definitions"
386
+ )
387
+
388
+ with gr.Column(scale=1):
389
+ gr.Markdown("### Tool Conversation Tokenization")
390
+
391
+ tool_highlighted_output = gr.HighlightedText(
392
+ label="Tokenized Tool Conversation",
393
+ show_inline_category=False
394
+ )
395
+
396
+ tool_token_count = gr.Label(
397
+ value="Total Tokens: 0",
398
+ label="Token Count"
399
+ )
400
+
401
+ gr.Markdown("### Conversation Preview")
402
+ conversation_display = gr.JSON(
403
+ label="Conversation Structure",
404
+ value=[]
405
+ )
406
 
407
  with gr.Accordion("How to use", open=False):
408
  gr.Markdown("""
409
+ ### Regular Dialogue Tab:
410
  1. **Enter dialogue**: Use the dialogue component to enter conversations
411
  2. **Speaker format**: Type `system:`, `user:`, or `assistant:` followed by your message
412
  3. **Submit**: Click 'Tokenize Dialogue' to process the conversation
413
  4. **View results**: See the tokenization details in the output area
414
 
415
+ ### Tool Use Examples Tab:
416
+ 1. **Select scenario**: Choose from weather query, math calculation, web search, or code execution
417
+ 2. **Load example**: Click 'Load Example' to see a tool-calling conversation
418
+ 3. **Compare tokenization**: See how tool calls differ from regular messages
419
+ 4. **Explore tools**: View available tool definitions and their parameters
 
420
 
421
  ### What you'll see:
422
  - **Total tokens**: Number of tokens in the conversation
423
+ - **Tokenized output**: How the tokenizer formats conversations and tool calls
424
+ - **Tool definitions**: JSON schema for available functions
425
+ - **Conversation structure**: The complete message flow including tool calls and responses
426
  """)
427
 
428
  def process_dialogue(dialogue):
 
436
  def clear_dialogue():
437
  return None, [], "Total Tokens: 0"
438
 
439
+ def load_tool_example(example_name):
440
+ """Load a tool use example and tokenize it"""
441
+ examples = create_tool_conversation_examples()
442
+ if example_name not in examples:
443
+ return gr.HighlightedText(value=[]), "Total Tokens: 0", []
444
+
445
+ conversation = examples[example_name]
446
+ try:
447
+ result_text, token_count_val = tokenize_tool_conversation(conversation)
448
+ return result_text, f"Total Tokens: {token_count_val}", conversation
449
+ except Exception as e:
450
+ error_msg = f"Error tokenizing conversation: {str(e)}"
451
+ return gr.HighlightedText(value=[(error_msg, "error")]), "Total Tokens: 0", conversation
452
+
453
  sample_btn.click(
454
  fn=create_sample_dialogue,
455
  outputs=[dialogue_input]
 
465
  inputs=[dialogue_input],
466
  outputs=[highlighted_output, token_count]
467
  )
468
+
469
+ # Tool use event handlers
470
+ load_example_btn.click(
471
+ fn=load_tool_example,
472
+ inputs=[example_dropdown],
473
+ outputs=[tool_highlighted_output, tool_token_count, conversation_display]
474
+ )
475
 
476
  if __name__ == "__main__":
477
  demo.launch()