| { | |
| "add_bos_token": true, | |
| "add_eos_token": false, | |
| "add_prefix_space": true, | |
| "added_tokens_decoder": { | |
| "0": { | |
| "content": "<unk>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "1": { | |
| "content": "<s>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "2": { | |
| "content": "</s>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "166100": { | |
| "content": "<|im_start|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "166101": { | |
| "content": "<|im_end|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "166102": { | |
| "content": "<|endoftext|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "166103": { | |
| "content": "<think>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": false | |
| }, | |
| "166104": { | |
| "content": "</think>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": false | |
| }, | |
| "166105": { | |
| "content": "<tool_call>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": false | |
| }, | |
| "166106": { | |
| "content": "</tool_call>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": false | |
| } | |
| }, | |
| "additional_special_tokens": [], | |
| "bos_token": "<|im_start|>", | |
| "chat_template": "\n {%- if tools %}\n {{- '<|im_start|>system\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\n\n' }}\n {%- else %} \n {{- '你是一位工具函数调用专家,你会得到一个问题和一组可能的工具函数。根据问题,你需要进行一个或多个函数/工具调用以实现目的,请尽量尝试探索通过工具解决问题。\n如果没有一个函数可以使用,请直接使用自然语言回复用户。\n如果给定的问题缺少函数所需的参数,请使用自然语言进行提问,向用户询问必要信息。\n如果调用结果已经足够回答用户问题,请对历史结果进行总结,使用自然语言回复用户。' }} \n {%- endif %}\n {{- \"# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\n</tool_call><|im_end|>\n\" }}\n {%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}\n {%- else %} \n {{- '<|im_start|>system\n你是南北阁,一款由BOSS直聘自主研发并训练的专业大语言模型。<|im_end|>\n' }} \n {%- endif %}\n {%- endif %}\n {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n {%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n {%- endfor %}\n {%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '</think>' in content %}\n {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}\n {%- set content = content.split('</think>')[-1].lstrip('\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\n<tool_response>\n' }}\n {{- content }}\n {{- '\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\n' }}\n {%- endif %}\n {%- endif %}\n {%- endfor %}\n {%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n' }}\n {%- endif %}\n", | |
| "clean_up_tokenization_spaces": false, | |
| "eos_token": "<|im_end|>", | |
| "extra_special_tokens": {}, | |
| "legacy": true, | |
| "model_max_length": 1000000000000000019884624838656, | |
| "pad_token": "<unk>", | |
| "sp_model_kwargs": {}, | |
| "spaces_between_special_tokens": false, | |
| "tokenizer_class": "LlamaTokenizer", | |
| "unk_token": "<unk>", | |
| "use_default_system_prompt": false | |
| } |