nehcgs commited on
Commit
33c4f8a
·
verified ·
1 Parent(s): 724436e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/Plano-Orchestrator.png filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT
2
+ **Version Release Date:** April 11th, 2025
3
+
4
+ This Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT is based on the Llama 3.2 Community License, Copyright © Meta Platforms, Inc. The terms and conditions have been adapted to reflect the proprietary nature of Katanemo Labs' materials.
5
+
6
+ ---
7
+
8
+ 1.Definitions
9
+ a. "Agreement": The terms and conditions for use, reproduction, distribution, and modification of the Katanemo Materials set forth herein.
10
+ b. "Documentation": The specifications, manuals, and documentation accompanying Katanemo LLMs v1.
11
+ c. "Licensee" or "you: The individual or entity entering into this Agreement, including your employer if you are acting on their behalf.
12
+ d. "Katanemo": The foundational large language models and software provided by Katanemo Labs, Inc., available at https://huggingface.co/katanemolabs.
13
+ e. "Katanemo Materials": Collectively, Katanemo's proprietary models and Documentation. Some Materials are derived from the Qwen language models licensed under the Qwen RESEARCH LICENSE AGREEMENT.
14
+ f. "Katanemo Labs" or "we": Katanemo Labs Inc., a Delaware, USA Corporation.
15
+
16
+ ---
17
+
18
+ 2.
19
+ By clicking "I Accept" or using any part of the Katanemo Materials, you agree to be bound by this Agreement.
20
+
21
+ ---
22
+
23
+ 3. LICENSE RIGHTS AND REDISTRIBUTION
24
+ a. Grant of Rights
25
+ Subject to the restrictions in Section 4, you are granted a non-exclusive, worldwide, non-transferable, and royalty-free license to:
26
+ - Use, reproduce, distribute, and modify the Katanemo Materials.
27
+ - Create derivative works based on the Katanemo Materials.
28
+
29
+ b. Permitted Redistribution
30
+ If you distribute the Katanemo Materials or any derivative work:
31
+ - You must include a copy of this License.
32
+ - You must prominently display the notice “Built with Katanemo” on a related website or documentation.
33
+
34
+ c. Attribution Requirement
35
+ You must include the following attribution notice in any distributed or public-facing use:
36
+ "Katanemo is licensed under the Katanemo Labs Community License.
37
+ Copyright © Katanemo Labs, Inc. All Rights Reserved."
38
+
39
+ d. Compliance Requirement
40
+ All use of the Katanemo Materials must comply with the Acceptable Use Policy, available at: https://katanemo.com/use-policy
41
+
42
+ ---
43
+
44
+ 4. COMMERCIAL USE AND DISTRIBUTION RESTRICTIONS
45
+
46
+ You may use, reproduce, modify, distribute, and create derivative works from the Katanemo Materials for any purpose, including commercial use, EXCEPT in the following cases:
47
+
48
+ You may NOT package, distribute, or make available the Katanemo Materials as part of:
49
+ - A framework,
50
+ - A proxy server,
51
+ - Middleware,
52
+ - A gateway infrastructure product,
53
+ - Or any product substantially similar in function or purpose to the above,
54
+
55
+ unless you obtain a separate commercial license from Katanemo Labs.
56
+
57
+ ---
58
+
59
+ This license does not grant trademark rights or rights outside the scope described above.
60
+
61
+ 5. Disclaimer of Warranty
62
+ The Katanemo Materials are provided "AS IS" without warranties of any kind, either express or implied, including but not limited to warranties of title, non-infringement, or fitness for a particular purpose.
63
+
64
+ ---
65
+
66
+ 6. Limitation of Liability
67
+ Katanemo Labs is not liable for any indirect, special, or consequential damages arising out of the use of the Katanemo Materials, even if advised of the possibility of such damages.
68
+
69
+ ---
70
+
71
+ 7. Intellectual Property
72
+ a. Trademarks
73
+ No trademark licenses are granted, except as required for attribution as described in Section 1.b. You may use the “Katanemo” mark according to Katanemo Labs' brand guidelines.
74
+
75
+ b. Ownership
76
+ You own any derivative works or modifications you create, except for portions owned by Katanemo Labs.
77
+
78
+ c. Litigation
79
+ If you file a lawsuit against Katanemo Labs regarding intellectual property, your license under this Agreement terminates.
80
+
81
+ ---
82
+
83
+ 8. Term and Termination
84
+ This Agreement continues until terminated. Katanemo Labs may terminate the Agreement if you breach any terms. Upon termination, you must cease using the Katanemo Materials.
85
+
86
+ ---
87
+
88
+ 10. Governing Law and Jurisdiction
89
+ This Agreement is governed by the laws of the State of Washington, USA. Any disputes will be resolved in the courts of California.
README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: katanemo-research
4
+ license_link: >-
5
+ https://huggingface.co/katanemo/Plano-Orchestrator-30B-A3B/blob/main/LICENSE
6
+ base_model:
7
+ - Qwen/Qwen3-30B-A3B-Instruct-2507
8
+ language:
9
+ - en
10
+ pipeline_tag: text-generation
11
+ ---
12
+ # katanemo/Plano-Orchestrator-30B-A3B
13
+
14
+ ## Overview
15
+
16
+ **Plano-Orchestrator** is a family of state-of-the-art routing and orchestration models that decide which agent(s) or LLM(s) should handle each request, and in what sequence. Built for multi-agent orchestration systems, Plano-Orchestrator excels at analyzing user intent and conversation context to make precise routing and orchestration decisions. Designed for real-world deployments, it delivers strong performance across general conversations, coding tasks, and long-context multi-turn conversations, while remaining efficient enough for low-latency production environments.
17
+
18
+ #### Key capabilities
19
+ - **Multi-turn Context Understanding**: Makes routing decisions based on full conversation history, maintaining contextual awareness across extended dialogues with evolving user needs.
20
+ - **Multi-intent Detection**: Identifies when a single user message requires multiple agents simultaneously, enabling parallel/sequential routing to fulfill complex requests.
21
+ - **Context-dependent Routing**: Correctly interprets ambiguous or referential messages by leveraging prior conversation context for accurate routing decisions.
22
+ - **Conversational Flow Handling**: Understands diverse interaction patterns including follow-ups, clarifications, confirmations, and corrections within ongoing conversations.
23
+ - **Negative Case Detection**: Recognizes when no specialized routing is needed, avoiding unnecessary LLM or agent calls for casual conversation.
24
+
25
+ ## Benchmark
26
+
27
+ We evaluate on **1,958 user messages** across **605 multi-turn conversations** with more than **130 different agents**, covering three scenarios:
28
+
29
+ - **General** (1,438 messages): Everyday conversational queries spanning diverse topics and agent types
30
+ - **Coding** (285 messages): Development-focused conversations including debugging, code generation, and technical assistance
31
+ - **Long-context** (235 messages): Extended conversations requiring understanding of extensive prior context
32
+
33
+ Each message is annotated with routing-relevant attributes, including not limited to intent multiplicity, context dependency, and continuation type. Below is the evaluation
34
+ result.
35
+
36
+ <div align="center">
37
+ <img width="100%" height="auto" src="./assets/Plano-Orchestrator.png"></a>
38
+ </div>
39
+
40
+ > [!NOTE]
41
+ > For evaluation, please note that all models were evaluated with minimal reasoning to ensure routing remains efficient.
42
+
43
+ ## Example
44
+
45
+ ```python
46
+ import json
47
+ import torch
48
+
49
+ from transformers import AutoTokenizer, AutoModelForCausalLM
50
+
51
+
52
+ ORCHESTRATION_PROMPT = (
53
+ "You are a helpful assistant that selects the most suitable routes based on user intent.\n"
54
+ "You are provided with a list of available routes enclosed within <routes></routes> XML tags:\n"
55
+ "<routes>\n{routes}\n</routes>\n\n"
56
+ "You are also given the conversation context enclosed within <conversation></conversation> XML tags:\n"
57
+ "<conversation>\n{conversation}\n</conversation>\n\n"
58
+ "## Instructions\n"
59
+ "1. Analyze the latest user intent from the conversation.\n"
60
+ "2. Compare it against the available routes to find which routes can help fulfill the request.\n"
61
+ "3. Respond only with the exact route names from <routes>.\n"
62
+ "4. If no routes can help or the intent is already fulfilled, return an empty list.\n\n"
63
+ "## Response Format\n"
64
+ "Return your answer strictly in JSON as follows:\n"
65
+ '{{"route": ["route_name_1", "route_name_2", "..."]}}\n'
66
+ "If no routes are needed, return an empty list for `route`."
67
+ )
68
+
69
+ def convert_agents_to_routes(agents):
70
+ tools = [
71
+ {
72
+ "name": agent["name"],
73
+ "description": agent["description"],
74
+ }
75
+ for agent in agents
76
+ ]
77
+ return "\n".join([json.dumps(tool, ensure_ascii=False) for tool in tools])
78
+
79
+ def build_messages(available_agents, conversation):
80
+ routes = convert_agents_to_routes(available_agents)
81
+ conversation_str = json.dumps(conversation, indent=4, ensure_ascii=False)
82
+ prompt = ORCHESTRATION_PROMPT.format(routes=routes, conversation=conversation_str)
83
+ return [{"role": "user", "content": prompt}]
84
+
85
+ # Load model
86
+ model_name = "katanemo/Plano-Orchestrator-30B-A3B"
87
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
88
+ model = AutoModelForCausalLM.from_pretrained(
89
+ model_name,
90
+ torch_dtype=torch.float16,
91
+ device_map="auto"
92
+ )
93
+
94
+ # Define available agents
95
+ available_agents = [
96
+ {"name": "WeatherAgent", "description": "Provides weather forecasts and current conditions for any location"},
97
+ {"name": "CodeAgent", "description": "Generates, debugs, explains, and reviews code in multiple programming languages"}
98
+ ]
99
+
100
+ # Conversation history
101
+ conversation = [
102
+ {"role": "user", "content": "What's the weather like today?"},
103
+ {"role": "assistant", "content": "I can help you with that. Could you tell me your location?"},
104
+ {"role": "user", "content": "San Francisco"},
105
+ ]
106
+
107
+ # Build messages and generate
108
+ model_inputs = tokenizer.apply_chat_template(
109
+ messages, add_generation_prompt=True, return_tensors="pt", return_dict=True
110
+ ).to(model.device)
111
+
112
+ generated_ids = model.generate(**model_inputs, max_new_tokens=32768)
113
+ generated_ids = [
114
+ output_ids[len(input_ids) :]
115
+ for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
116
+ ]
117
+
118
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
119
+ print(response)
120
+ # Output: {"route": ["WeatherAgent"]}
121
+ ```
122
+
123
+ ## License
124
+
125
+ The Plano-Orchestrator collection is distributed under the [Katanemo license](https://huggingface.co/katanemo/Plano-Orchestrator-30B-A3B/blob/main/LICENSE).
assets/Plano-Orchestrator.png ADDED

Git LFS Details

  • SHA256: 92325f361f49696440c5e919424565b10d1db59bcc23ab18c471ceef1d3857b4
  • Pointer size: 131 Bytes
  • Size of remote file: 209 kB
chat_template.jinja ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- for message in messages %}
18
+ {%- if message.content is string %}
19
+ {%- set content = message.content %}
20
+ {%- else %}
21
+ {%- set content = '' %}
22
+ {%- endif %}
23
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
24
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
25
+ {%- elif message.role == "assistant" %}
26
+ {{- '<|im_start|>' + message.role + '\n' + content }}
27
+ {%- if message.tool_calls %}
28
+ {%- for tool_call in message.tool_calls %}
29
+ {%- if (loop.first and content) or (not loop.first) %}
30
+ {{- '\n' }}
31
+ {%- endif %}
32
+ {%- if tool_call.function %}
33
+ {%- set tool_call = tool_call.function %}
34
+ {%- endif %}
35
+ {{- '<tool_call>\n{"name": "' }}
36
+ {{- tool_call.name }}
37
+ {{- '", "arguments": ' }}
38
+ {%- if tool_call.arguments is string %}
39
+ {{- tool_call.arguments }}
40
+ {%- else %}
41
+ {{- tool_call.arguments | tojson }}
42
+ {%- endif %}
43
+ {{- '}\n</tool_call>' }}
44
+ {%- endfor %}
45
+ {%- endif %}
46
+ {{- '<|im_end|>\n' }}
47
+ {%- elif message.role == "tool" %}
48
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
49
+ {{- '<|im_start|>user' }}
50
+ {%- endif %}
51
+ {{- '\n<tool_response>\n' }}
52
+ {{- content }}
53
+ {{- '\n</tool_response>' }}
54
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
55
+ {{- '<|im_end|>\n' }}
56
+ {%- endif %}
57
+ {%- endif %}
58
+ {%- endfor %}
59
+ {%- if add_generation_prompt %}
60
+ {{- '<|im_start|>assistant\n' }}
61
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3MoeForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "decoder_sparse_step": 1,
9
+ "eos_token_id": 151645,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2048,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 6144,
15
+ "max_position_embeddings": 262144,
16
+ "max_window_layers": 48,
17
+ "mlp_only_layers": [],
18
+ "model_type": "qwen3_moe",
19
+ "moe_intermediate_size": 768,
20
+ "norm_topk_prob": true,
21
+ "num_attention_heads": 32,
22
+ "num_experts": 128,
23
+ "num_experts_per_tok": 8,
24
+ "num_hidden_layers": 48,
25
+ "num_key_value_heads": 4,
26
+ "output_router_logits": false,
27
+ "rms_norm_eps": 1e-06,
28
+ "rope_scaling": null,
29
+ "rope_theta": 10000000,
30
+ "router_aux_loss_coef": 0.001,
31
+ "sliding_window": null,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.52.4",
35
+ "use_cache": true,
36
+ "use_sliding_window": false,
37
+ "vocab_size": 151936
38
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.52.4"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eee33dff0a547b3862f08e27f1e4944bee9ceccb6418e406491a33b780a0376
3
+ size 4997184968
model-00002-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e4f64e2f916bc6bc2a473ebe25a3ec8e71d5beaa2bfe896440752bff706d345
3
+ size 4997741608
model-00003-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:102fec235d80fd8e1936813d53a8958dc08fe7127653833f38f0d5c17cea4b49
3
+ size 4997742208
model-00004-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b65f7982f12c649f3160ebf7e5c0d26be501d269ebe035bc390b94a67639df2
3
+ size 4997743184
model-00005-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0061945a1e1acf344bdb2677988a5d351ca236b1b42783ae9a23f39296b407ac
3
+ size 4997743184
model-00006-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef0af4bc624d6dbff9985cb1b1beb2d18d07c47763a911f618aef19bfe7e312c
3
+ size 4997743184
model-00007-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ef8c43b6fa66d7433e781e5292c22a408ade51bd98e2bf669d12eff6ef41ed1
3
+ size 4997743184
model-00008-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2565875f9da73bb91eedd5d9c22b232e7574635452737a8348ae1eb667cc30f
3
+ size 4997743184
model-00009-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:608755ac8e441b798b2fe5b5eb1b29378a77c8c798ad6deee14894c653e68bc5
3
+ size 4997743184
model-00010-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:937ac5288a8f6e06e7b5d62f42d867f64371231e3d2ad067ba3685f266b02f08
3
+ size 4997743184
model-00011-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb296de73d5c386759cc728fc198c92cf5d93bcdfb78efee09daf3d95ee764e
3
+ size 4997743184
model-00012-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b40966b93c7d12615c815463bb03ea4fb4abb58497538618353d279e6efd9bee
3
+ size 4997743184
model-00013-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e18b553605082871839ca426e29ddb1cb149c1c0b0419b598cfbaba0417d21cf
3
+ size 1094220288
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 1010000,
235
+ "pad_token": "<|endoftext|>",
236
+ "padding_side": "left",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff