Training in progress, step 1
Browse files- config.json +3 -2
- model.safetensors +1 -1
- tokenizer_config.json +1 -4
- training_args.bin +1 -1
config.json
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
"Qwen2ForCausalLM"
|
5 |
],
|
6 |
"attention_dropout": 0.0,
|
7 |
-
"bos_token_id":
|
8 |
"eos_token_id": 151643,
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 1536,
|
@@ -16,6 +16,7 @@
|
|
16 |
"num_attention_heads": 12,
|
17 |
"num_hidden_layers": 28,
|
18 |
"num_key_value_heads": 2,
|
|
|
19 |
"rms_norm_eps": 1e-06,
|
20 |
"rope_scaling": null,
|
21 |
"rope_theta": 10000,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "agentica-org/DeepScaleR-1.5B-Preview",
|
3 |
"architectures": [
|
4 |
"Qwen2ForCausalLM"
|
5 |
],
|
6 |
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 151646,
|
8 |
"eos_token_id": 151643,
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 1536,
|
|
|
16 |
"num_attention_heads": 12,
|
17 |
"num_hidden_layers": 28,
|
18 |
"num_key_value_heads": 2,
|
19 |
+
"pad_token_id": 151643,
|
20 |
"rms_norm_eps": 1e-06,
|
21 |
"rope_scaling": null,
|
22 |
"rope_theta": 10000,
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3554214752
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f7ac339edb0619d0d27a4fd377c02b800bff11f9d3faea65507c2f91aa8f8fc
|
3 |
size 3554214752
|
tokenizer_config.json
CHANGED
@@ -181,16 +181,13 @@
|
|
181 |
}
|
182 |
},
|
183 |
"bos_token": "<|begin▁of▁sentence|>",
|
184 |
-
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant
|
185 |
"clean_up_tokenization_spaces": false,
|
186 |
"eos_token": "<|end▁of▁sentence|>",
|
187 |
"extra_special_tokens": {},
|
188 |
"legacy": true,
|
189 |
-
"max_length": null,
|
190 |
"model_max_length": 16384,
|
191 |
-
"pad_to_multiple_of": null,
|
192 |
"pad_token": "<|end▁of▁sentence|>",
|
193 |
-
"pad_token_type_id": 0,
|
194 |
"padding_side": "left",
|
195 |
"sp_model_kwargs": {},
|
196 |
"tokenizer_class": "LlamaTokenizer",
|
|
|
181 |
}
|
182 |
},
|
183 |
"bos_token": "<|begin▁of▁sentence|>",
|
184 |
+
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}",
|
185 |
"clean_up_tokenization_spaces": false,
|
186 |
"eos_token": "<|end▁of▁sentence|>",
|
187 |
"extra_special_tokens": {},
|
188 |
"legacy": true,
|
|
|
189 |
"model_max_length": 16384,
|
|
|
190 |
"pad_token": "<|end▁of▁sentence|>",
|
|
|
191 |
"padding_side": "left",
|
192 |
"sp_model_kwargs": {},
|
193 |
"tokenizer_class": "LlamaTokenizer",
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 7224
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da4991abe6d2bd25cda2430cbc06f9f3763036b295c59c683f255514bd069733
|
3 |
size 7224
|