wyuc commited on
Commit
a0c3985
·
verified ·
1 Parent(s): a5bd87b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: THU-KEG/LongWriter-V-7B
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: LongWriter-V-7B-DPO
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # LongWriter-V-7B-DPO
18
+
19
+ This model is a fine-tuned version of [THU-KEG/LongWriter-V-7B](https://huggingface.co/THU-KEG/LongWriter-V-7B) on the LongWriter-V-DPO dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 3e-06
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 64
46
+ - total_eval_batch_size: 64
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 3.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.49.0.dev0
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 3.2.0
61
+ - Tokenizers 0.21.0
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.943820224719101,
3
+ "total_flos": 228521444442112.0,
4
+ "train_loss": 0.17045999738028772,
5
+ "train_runtime": 5166.54,
6
+ "train_samples_per_second": 1.651,
7
+ "train_steps_per_second": 0.026
8
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
3
+ }
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "THU-KEG/LongWriter-V-7B",
3
+ "architectures": [
4
+ "Qwen2_5_VLForConditionalGeneration"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "image_token_id": 151655,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 18944,
14
+ "max_position_embeddings": 128000,
15
+ "max_window_layers": 28,
16
+ "model_type": "qwen2_5_vl",
17
+ "num_attention_heads": 28,
18
+ "num_hidden_layers": 28,
19
+ "num_key_value_heads": 4,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": {
22
+ "mrope_section": [
23
+ 16,
24
+ 24,
25
+ 24
26
+ ],
27
+ "rope_type": "default",
28
+ "type": "default"
29
+ },
30
+ "rope_theta": 1000000.0,
31
+ "sliding_window": 32768,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.49.0.dev0",
35
+ "use_cache": false,
36
+ "use_sliding_window": false,
37
+ "video_token_id": 151656,
38
+ "vision_config": {
39
+ "hidden_size": 1280,
40
+ "in_chans": 3,
41
+ "model_type": "qwen2_5_vl",
42
+ "spatial_patch_size": 14,
43
+ "tokens_per_second": 2,
44
+ "torch_dtype": "float32"
45
+ },
46
+ "vision_end_token_id": 151653,
47
+ "vision_start_token_id": 151652,
48
+ "vision_token_id": 151654,
49
+ "vocab_size": 152064
50
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.1,
11
+ "top_k": 1,
12
+ "top_p": 0.001,
13
+ "transformers_version": "4.49.0.dev0"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:916e139fac57a47f4c2481c5910153a90539ca23d84f1443ff47ae0c1988eb8f
3
+ size 4968243304
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d773d20349f25b65efe78714b5e6e14427ff340e88d98a275922148a5ce8031
3
+ size 4991495816
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e75d6d8ea67dedec978f6b506363a1ee3bc58d41bf6c8abdd6205d2ca7f1b0e
3
+ size 4932751040
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb0700f0d2c142464b1ce84ee39c06701b1e12efd021d553fc4fdf13325c4f8b
3
+ size 1691924384
model.safetensors.index.json ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16584333312
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
278
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
279
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
284
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
285
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
286
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
288
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
289
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
290
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
297
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
298
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
299
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
300
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
301
+ "model.layers.6.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
304
+ "model.layers.6.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
305
+ "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
306
+ "model.layers.6.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
307
+ "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
308
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
309
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
310
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
311
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
312
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
313
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
314
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
315
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
316
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
317
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
318
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
319
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
320
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
321
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
322
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
323
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
324
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
325
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
326
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
327
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
328
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
329
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
330
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
331
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
332
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
333
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
334
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
336
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
337
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
338
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
339
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
340
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
341
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
342
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
343
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
344
+ "model.norm.weight": "model-00004-of-00004.safetensors",
345
+ "visual.blocks.0.attn.proj.bias": "model-00001-of-00004.safetensors",
346
+ "visual.blocks.0.attn.proj.weight": "model-00001-of-00004.safetensors",
347
+ "visual.blocks.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
348
+ "visual.blocks.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
349
+ "visual.blocks.0.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
350
+ "visual.blocks.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
351
+ "visual.blocks.0.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
352
+ "visual.blocks.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
353
+ "visual.blocks.0.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
354
+ "visual.blocks.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
355
+ "visual.blocks.0.norm1.weight": "model-00001-of-00004.safetensors",
356
+ "visual.blocks.0.norm2.weight": "model-00001-of-00004.safetensors",
357
+ "visual.blocks.1.attn.proj.bias": "model-00001-of-00004.safetensors",
358
+ "visual.blocks.1.attn.proj.weight": "model-00001-of-00004.safetensors",
359
+ "visual.blocks.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
360
+ "visual.blocks.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
361
+ "visual.blocks.1.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
362
+ "visual.blocks.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
363
+ "visual.blocks.1.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
364
+ "visual.blocks.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
365
+ "visual.blocks.1.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
366
+ "visual.blocks.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
367
+ "visual.blocks.1.norm1.weight": "model-00001-of-00004.safetensors",
368
+ "visual.blocks.1.norm2.weight": "model-00001-of-00004.safetensors",
369
+ "visual.blocks.10.attn.proj.bias": "model-00001-of-00004.safetensors",
370
+ "visual.blocks.10.attn.proj.weight": "model-00001-of-00004.safetensors",
371
+ "visual.blocks.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
372
+ "visual.blocks.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
373
+ "visual.blocks.10.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
374
+ "visual.blocks.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
375
+ "visual.blocks.10.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
376
+ "visual.blocks.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
377
+ "visual.blocks.10.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
378
+ "visual.blocks.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
379
+ "visual.blocks.10.norm1.weight": "model-00001-of-00004.safetensors",
380
+ "visual.blocks.10.norm2.weight": "model-00001-of-00004.safetensors",
381
+ "visual.blocks.11.attn.proj.bias": "model-00001-of-00004.safetensors",
382
+ "visual.blocks.11.attn.proj.weight": "model-00001-of-00004.safetensors",
383
+ "visual.blocks.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
384
+ "visual.blocks.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
385
+ "visual.blocks.11.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
386
+ "visual.blocks.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
387
+ "visual.blocks.11.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
388
+ "visual.blocks.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
389
+ "visual.blocks.11.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
390
+ "visual.blocks.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
391
+ "visual.blocks.11.norm1.weight": "model-00001-of-00004.safetensors",
392
+ "visual.blocks.11.norm2.weight": "model-00001-of-00004.safetensors",
393
+ "visual.blocks.12.attn.proj.bias": "model-00001-of-00004.safetensors",
394
+ "visual.blocks.12.attn.proj.weight": "model-00001-of-00004.safetensors",
395
+ "visual.blocks.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
396
+ "visual.blocks.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
397
+ "visual.blocks.12.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
398
+ "visual.blocks.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
399
+ "visual.blocks.12.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
400
+ "visual.blocks.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
401
+ "visual.blocks.12.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
402
+ "visual.blocks.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
403
+ "visual.blocks.12.norm1.weight": "model-00001-of-00004.safetensors",
404
+ "visual.blocks.12.norm2.weight": "model-00001-of-00004.safetensors",
405
+ "visual.blocks.13.attn.proj.bias": "model-00001-of-00004.safetensors",
406
+ "visual.blocks.13.attn.proj.weight": "model-00001-of-00004.safetensors",
407
+ "visual.blocks.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
408
+ "visual.blocks.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
409
+ "visual.blocks.13.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
410
+ "visual.blocks.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
411
+ "visual.blocks.13.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
412
+ "visual.blocks.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
413
+ "visual.blocks.13.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
414
+ "visual.blocks.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
415
+ "visual.blocks.13.norm1.weight": "model-00001-of-00004.safetensors",
416
+ "visual.blocks.13.norm2.weight": "model-00001-of-00004.safetensors",
417
+ "visual.blocks.14.attn.proj.bias": "model-00001-of-00004.safetensors",
418
+ "visual.blocks.14.attn.proj.weight": "model-00001-of-00004.safetensors",
419
+ "visual.blocks.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
420
+ "visual.blocks.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
421
+ "visual.blocks.14.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
422
+ "visual.blocks.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
423
+ "visual.blocks.14.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
424
+ "visual.blocks.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
425
+ "visual.blocks.14.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
426
+ "visual.blocks.14.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
427
+ "visual.blocks.14.norm1.weight": "model-00001-of-00004.safetensors",
428
+ "visual.blocks.14.norm2.weight": "model-00001-of-00004.safetensors",
429
+ "visual.blocks.15.attn.proj.bias": "model-00001-of-00004.safetensors",
430
+ "visual.blocks.15.attn.proj.weight": "model-00001-of-00004.safetensors",
431
+ "visual.blocks.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
432
+ "visual.blocks.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
433
+ "visual.blocks.15.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
434
+ "visual.blocks.15.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
435
+ "visual.blocks.15.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
436
+ "visual.blocks.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
437
+ "visual.blocks.15.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
438
+ "visual.blocks.15.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
439
+ "visual.blocks.15.norm1.weight": "model-00001-of-00004.safetensors",
440
+ "visual.blocks.15.norm2.weight": "model-00001-of-00004.safetensors",
441
+ "visual.blocks.16.attn.proj.bias": "model-00001-of-00004.safetensors",
442
+ "visual.blocks.16.attn.proj.weight": "model-00001-of-00004.safetensors",
443
+ "visual.blocks.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
444
+ "visual.blocks.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
445
+ "visual.blocks.16.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
446
+ "visual.blocks.16.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
447
+ "visual.blocks.16.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
448
+ "visual.blocks.16.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
449
+ "visual.blocks.16.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
450
+ "visual.blocks.16.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
451
+ "visual.blocks.16.norm1.weight": "model-00001-of-00004.safetensors",
452
+ "visual.blocks.16.norm2.weight": "model-00001-of-00004.safetensors",
453
+ "visual.blocks.17.attn.proj.bias": "model-00001-of-00004.safetensors",
454
+ "visual.blocks.17.attn.proj.weight": "model-00001-of-00004.safetensors",
455
+ "visual.blocks.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
456
+ "visual.blocks.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
457
+ "visual.blocks.17.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
458
+ "visual.blocks.17.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
459
+ "visual.blocks.17.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
460
+ "visual.blocks.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
461
+ "visual.blocks.17.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
462
+ "visual.blocks.17.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
463
+ "visual.blocks.17.norm1.weight": "model-00001-of-00004.safetensors",
464
+ "visual.blocks.17.norm2.weight": "model-00001-of-00004.safetensors",
465
+ "visual.blocks.18.attn.proj.bias": "model-00001-of-00004.safetensors",
466
+ "visual.blocks.18.attn.proj.weight": "model-00001-of-00004.safetensors",
467
+ "visual.blocks.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
468
+ "visual.blocks.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
469
+ "visual.blocks.18.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
470
+ "visual.blocks.18.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
471
+ "visual.blocks.18.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
472
+ "visual.blocks.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
473
+ "visual.blocks.18.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
474
+ "visual.blocks.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
475
+ "visual.blocks.18.norm1.weight": "model-00001-of-00004.safetensors",
476
+ "visual.blocks.18.norm2.weight": "model-00001-of-00004.safetensors",
477
+ "visual.blocks.19.attn.proj.bias": "model-00001-of-00004.safetensors",
478
+ "visual.blocks.19.attn.proj.weight": "model-00001-of-00004.safetensors",
479
+ "visual.blocks.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
480
+ "visual.blocks.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
481
+ "visual.blocks.19.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
482
+ "visual.blocks.19.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
483
+ "visual.blocks.19.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
484
+ "visual.blocks.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
485
+ "visual.blocks.19.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
486
+ "visual.blocks.19.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
487
+ "visual.blocks.19.norm1.weight": "model-00001-of-00004.safetensors",
488
+ "visual.blocks.19.norm2.weight": "model-00001-of-00004.safetensors",
489
+ "visual.blocks.2.attn.proj.bias": "model-00001-of-00004.safetensors",
490
+ "visual.blocks.2.attn.proj.weight": "model-00001-of-00004.safetensors",
491
+ "visual.blocks.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
492
+ "visual.blocks.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
493
+ "visual.blocks.2.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
494
+ "visual.blocks.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
495
+ "visual.blocks.2.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
496
+ "visual.blocks.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
497
+ "visual.blocks.2.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
498
+ "visual.blocks.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
499
+ "visual.blocks.2.norm1.weight": "model-00001-of-00004.safetensors",
500
+ "visual.blocks.2.norm2.weight": "model-00001-of-00004.safetensors",
501
+ "visual.blocks.20.attn.proj.bias": "model-00001-of-00004.safetensors",
502
+ "visual.blocks.20.attn.proj.weight": "model-00001-of-00004.safetensors",
503
+ "visual.blocks.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
504
+ "visual.blocks.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
505
+ "visual.blocks.20.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
506
+ "visual.blocks.20.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
507
+ "visual.blocks.20.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
508
+ "visual.blocks.20.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
509
+ "visual.blocks.20.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
510
+ "visual.blocks.20.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
511
+ "visual.blocks.20.norm1.weight": "model-00001-of-00004.safetensors",
512
+ "visual.blocks.20.norm2.weight": "model-00001-of-00004.safetensors",
513
+ "visual.blocks.21.attn.proj.bias": "model-00001-of-00004.safetensors",
514
+ "visual.blocks.21.attn.proj.weight": "model-00001-of-00004.safetensors",
515
+ "visual.blocks.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
516
+ "visual.blocks.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
517
+ "visual.blocks.21.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
518
+ "visual.blocks.21.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
519
+ "visual.blocks.21.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
520
+ "visual.blocks.21.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
521
+ "visual.blocks.21.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
522
+ "visual.blocks.21.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
523
+ "visual.blocks.21.norm1.weight": "model-00001-of-00004.safetensors",
524
+ "visual.blocks.21.norm2.weight": "model-00001-of-00004.safetensors",
525
+ "visual.blocks.22.attn.proj.bias": "model-00001-of-00004.safetensors",
526
+ "visual.blocks.22.attn.proj.weight": "model-00001-of-00004.safetensors",
527
+ "visual.blocks.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
528
+ "visual.blocks.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
529
+ "visual.blocks.22.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
530
+ "visual.blocks.22.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
531
+ "visual.blocks.22.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
532
+ "visual.blocks.22.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
533
+ "visual.blocks.22.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
534
+ "visual.blocks.22.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
535
+ "visual.blocks.22.norm1.weight": "model-00001-of-00004.safetensors",
536
+ "visual.blocks.22.norm2.weight": "model-00001-of-00004.safetensors",
537
+ "visual.blocks.23.attn.proj.bias": "model-00001-of-00004.safetensors",
538
+ "visual.blocks.23.attn.proj.weight": "model-00001-of-00004.safetensors",
539
+ "visual.blocks.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
540
+ "visual.blocks.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
541
+ "visual.blocks.23.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
542
+ "visual.blocks.23.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
543
+ "visual.blocks.23.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
544
+ "visual.blocks.23.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
545
+ "visual.blocks.23.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
546
+ "visual.blocks.23.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
547
+ "visual.blocks.23.norm1.weight": "model-00001-of-00004.safetensors",
548
+ "visual.blocks.23.norm2.weight": "model-00001-of-00004.safetensors",
549
+ "visual.blocks.24.attn.proj.bias": "model-00001-of-00004.safetensors",
550
+ "visual.blocks.24.attn.proj.weight": "model-00001-of-00004.safetensors",
551
+ "visual.blocks.24.attn.qkv.bias": "model-00001-of-00004.safetensors",
552
+ "visual.blocks.24.attn.qkv.weight": "model-00001-of-00004.safetensors",
553
+ "visual.blocks.24.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
554
+ "visual.blocks.24.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
555
+ "visual.blocks.24.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
556
+ "visual.blocks.24.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
557
+ "visual.blocks.24.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
558
+ "visual.blocks.24.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
559
+ "visual.blocks.24.norm1.weight": "model-00001-of-00004.safetensors",
560
+ "visual.blocks.24.norm2.weight": "model-00001-of-00004.safetensors",
561
+ "visual.blocks.25.attn.proj.bias": "model-00001-of-00004.safetensors",
562
+ "visual.blocks.25.attn.proj.weight": "model-00001-of-00004.safetensors",
563
+ "visual.blocks.25.attn.qkv.bias": "model-00001-of-00004.safetensors",
564
+ "visual.blocks.25.attn.qkv.weight": "model-00001-of-00004.safetensors",
565
+ "visual.blocks.25.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
566
+ "visual.blocks.25.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
567
+ "visual.blocks.25.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
568
+ "visual.blocks.25.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
569
+ "visual.blocks.25.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
570
+ "visual.blocks.25.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
571
+ "visual.blocks.25.norm1.weight": "model-00001-of-00004.safetensors",
572
+ "visual.blocks.25.norm2.weight": "model-00001-of-00004.safetensors",
573
+ "visual.blocks.26.attn.proj.bias": "model-00001-of-00004.safetensors",
574
+ "visual.blocks.26.attn.proj.weight": "model-00001-of-00004.safetensors",
575
+ "visual.blocks.26.attn.qkv.bias": "model-00001-of-00004.safetensors",
576
+ "visual.blocks.26.attn.qkv.weight": "model-00001-of-00004.safetensors",
577
+ "visual.blocks.26.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
578
+ "visual.blocks.26.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
579
+ "visual.blocks.26.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
580
+ "visual.blocks.26.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
581
+ "visual.blocks.26.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
582
+ "visual.blocks.26.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
583
+ "visual.blocks.26.norm1.weight": "model-00001-of-00004.safetensors",
584
+ "visual.blocks.26.norm2.weight": "model-00001-of-00004.safetensors",
585
+ "visual.blocks.27.attn.proj.bias": "model-00001-of-00004.safetensors",
586
+ "visual.blocks.27.attn.proj.weight": "model-00001-of-00004.safetensors",
587
+ "visual.blocks.27.attn.qkv.bias": "model-00001-of-00004.safetensors",
588
+ "visual.blocks.27.attn.qkv.weight": "model-00001-of-00004.safetensors",
589
+ "visual.blocks.27.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
590
+ "visual.blocks.27.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
591
+ "visual.blocks.27.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
592
+ "visual.blocks.27.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
593
+ "visual.blocks.27.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
594
+ "visual.blocks.27.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
595
+ "visual.blocks.27.norm1.weight": "model-00001-of-00004.safetensors",
596
+ "visual.blocks.27.norm2.weight": "model-00001-of-00004.safetensors",
597
+ "visual.blocks.28.attn.proj.bias": "model-00001-of-00004.safetensors",
598
+ "visual.blocks.28.attn.proj.weight": "model-00001-of-00004.safetensors",
599
+ "visual.blocks.28.attn.qkv.bias": "model-00001-of-00004.safetensors",
600
+ "visual.blocks.28.attn.qkv.weight": "model-00001-of-00004.safetensors",
601
+ "visual.blocks.28.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
602
+ "visual.blocks.28.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
603
+ "visual.blocks.28.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
604
+ "visual.blocks.28.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
605
+ "visual.blocks.28.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
606
+ "visual.blocks.28.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
607
+ "visual.blocks.28.norm1.weight": "model-00001-of-00004.safetensors",
608
+ "visual.blocks.28.norm2.weight": "model-00001-of-00004.safetensors",
609
+ "visual.blocks.29.attn.proj.bias": "model-00001-of-00004.safetensors",
610
+ "visual.blocks.29.attn.proj.weight": "model-00001-of-00004.safetensors",
611
+ "visual.blocks.29.attn.qkv.bias": "model-00001-of-00004.safetensors",
612
+ "visual.blocks.29.attn.qkv.weight": "model-00001-of-00004.safetensors",
613
+ "visual.blocks.29.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
614
+ "visual.blocks.29.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
615
+ "visual.blocks.29.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
616
+ "visual.blocks.29.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
617
+ "visual.blocks.29.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
618
+ "visual.blocks.29.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
619
+ "visual.blocks.29.norm1.weight": "model-00001-of-00004.safetensors",
620
+ "visual.blocks.29.norm2.weight": "model-00001-of-00004.safetensors",
621
+ "visual.blocks.3.attn.proj.bias": "model-00001-of-00004.safetensors",
622
+ "visual.blocks.3.attn.proj.weight": "model-00001-of-00004.safetensors",
623
+ "visual.blocks.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
624
+ "visual.blocks.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
625
+ "visual.blocks.3.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
626
+ "visual.blocks.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
627
+ "visual.blocks.3.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
628
+ "visual.blocks.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
629
+ "visual.blocks.3.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
630
+ "visual.blocks.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
631
+ "visual.blocks.3.norm1.weight": "model-00001-of-00004.safetensors",
632
+ "visual.blocks.3.norm2.weight": "model-00001-of-00004.safetensors",
633
+ "visual.blocks.30.attn.proj.bias": "model-00001-of-00004.safetensors",
634
+ "visual.blocks.30.attn.proj.weight": "model-00001-of-00004.safetensors",
635
+ "visual.blocks.30.attn.qkv.bias": "model-00001-of-00004.safetensors",
636
+ "visual.blocks.30.attn.qkv.weight": "model-00001-of-00004.safetensors",
637
+ "visual.blocks.30.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
638
+ "visual.blocks.30.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
639
+ "visual.blocks.30.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
640
+ "visual.blocks.30.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
641
+ "visual.blocks.30.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
642
+ "visual.blocks.30.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
643
+ "visual.blocks.30.norm1.weight": "model-00001-of-00004.safetensors",
644
+ "visual.blocks.30.norm2.weight": "model-00001-of-00004.safetensors",
645
+ "visual.blocks.31.attn.proj.bias": "model-00001-of-00004.safetensors",
646
+ "visual.blocks.31.attn.proj.weight": "model-00001-of-00004.safetensors",
647
+ "visual.blocks.31.attn.qkv.bias": "model-00001-of-00004.safetensors",
648
+ "visual.blocks.31.attn.qkv.weight": "model-00001-of-00004.safetensors",
649
+ "visual.blocks.31.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
650
+ "visual.blocks.31.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
651
+ "visual.blocks.31.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
652
+ "visual.blocks.31.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
653
+ "visual.blocks.31.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
654
+ "visual.blocks.31.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
655
+ "visual.blocks.31.norm1.weight": "model-00001-of-00004.safetensors",
656
+ "visual.blocks.31.norm2.weight": "model-00001-of-00004.safetensors",
657
+ "visual.blocks.4.attn.proj.bias": "model-00001-of-00004.safetensors",
658
+ "visual.blocks.4.attn.proj.weight": "model-00001-of-00004.safetensors",
659
+ "visual.blocks.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
660
+ "visual.blocks.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
661
+ "visual.blocks.4.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
662
+ "visual.blocks.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
663
+ "visual.blocks.4.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
664
+ "visual.blocks.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
665
+ "visual.blocks.4.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
666
+ "visual.blocks.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
667
+ "visual.blocks.4.norm1.weight": "model-00001-of-00004.safetensors",
668
+ "visual.blocks.4.norm2.weight": "model-00001-of-00004.safetensors",
669
+ "visual.blocks.5.attn.proj.bias": "model-00001-of-00004.safetensors",
670
+ "visual.blocks.5.attn.proj.weight": "model-00001-of-00004.safetensors",
671
+ "visual.blocks.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
672
+ "visual.blocks.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
673
+ "visual.blocks.5.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
674
+ "visual.blocks.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
675
+ "visual.blocks.5.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
676
+ "visual.blocks.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
677
+ "visual.blocks.5.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
678
+ "visual.blocks.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
679
+ "visual.blocks.5.norm1.weight": "model-00001-of-00004.safetensors",
680
+ "visual.blocks.5.norm2.weight": "model-00001-of-00004.safetensors",
681
+ "visual.blocks.6.attn.proj.bias": "model-00001-of-00004.safetensors",
682
+ "visual.blocks.6.attn.proj.weight": "model-00001-of-00004.safetensors",
683
+ "visual.blocks.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
684
+ "visual.blocks.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
685
+ "visual.blocks.6.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
686
+ "visual.blocks.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
687
+ "visual.blocks.6.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
688
+ "visual.blocks.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
689
+ "visual.blocks.6.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
690
+ "visual.blocks.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
691
+ "visual.blocks.6.norm1.weight": "model-00001-of-00004.safetensors",
692
+ "visual.blocks.6.norm2.weight": "model-00001-of-00004.safetensors",
693
+ "visual.blocks.7.attn.proj.bias": "model-00001-of-00004.safetensors",
694
+ "visual.blocks.7.attn.proj.weight": "model-00001-of-00004.safetensors",
695
+ "visual.blocks.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
696
+ "visual.blocks.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
697
+ "visual.blocks.7.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
698
+ "visual.blocks.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
699
+ "visual.blocks.7.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
700
+ "visual.blocks.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
701
+ "visual.blocks.7.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
702
+ "visual.blocks.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
703
+ "visual.blocks.7.norm1.weight": "model-00001-of-00004.safetensors",
704
+ "visual.blocks.7.norm2.weight": "model-00001-of-00004.safetensors",
705
+ "visual.blocks.8.attn.proj.bias": "model-00001-of-00004.safetensors",
706
+ "visual.blocks.8.attn.proj.weight": "model-00001-of-00004.safetensors",
707
+ "visual.blocks.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
708
+ "visual.blocks.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
709
+ "visual.blocks.8.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
710
+ "visual.blocks.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
711
+ "visual.blocks.8.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
712
+ "visual.blocks.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
713
+ "visual.blocks.8.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
714
+ "visual.blocks.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
715
+ "visual.blocks.8.norm1.weight": "model-00001-of-00004.safetensors",
716
+ "visual.blocks.8.norm2.weight": "model-00001-of-00004.safetensors",
717
+ "visual.blocks.9.attn.proj.bias": "model-00001-of-00004.safetensors",
718
+ "visual.blocks.9.attn.proj.weight": "model-00001-of-00004.safetensors",
719
+ "visual.blocks.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
720
+ "visual.blocks.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
721
+ "visual.blocks.9.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
722
+ "visual.blocks.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
723
+ "visual.blocks.9.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
724
+ "visual.blocks.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
725
+ "visual.blocks.9.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
726
+ "visual.blocks.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
727
+ "visual.blocks.9.norm1.weight": "model-00001-of-00004.safetensors",
728
+ "visual.blocks.9.norm2.weight": "model-00001-of-00004.safetensors",
729
+ "visual.merger.ln_q.weight": "model-00001-of-00004.safetensors",
730
+ "visual.merger.mlp.0.bias": "model-00001-of-00004.safetensors",
731
+ "visual.merger.mlp.0.weight": "model-00001-of-00004.safetensors",
732
+ "visual.merger.mlp.2.bias": "model-00001-of-00004.safetensors",
733
+ "visual.merger.mlp.2.weight": "model-00001-of-00004.safetensors",
734
+ "visual.patch_embed.proj.weight": "model-00001-of-00004.safetensors"
735
+ }
736
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2_5_VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 12845056,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2_5_VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "longest_edge": 12845056,
26
+ "shortest_edge": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 32768,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "processor_class": "Qwen2_5_VLProcessor",
207
+ "split_special_tokens": false,
208
+ "tokenizer_class": "Qwen2Tokenizer",
209
+ "unk_token": null
210
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.943820224719101,
3
+ "total_flos": 228521444442112.0,
4
+ "train_loss": 0.17045999738028772,
5
+ "train_runtime": 5166.54,
6
+ "train_samples_per_second": 1.651,
7
+ "train_steps_per_second": 0.026
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 132, "loss": 0.6931, "accuracy": 0.0, "lr": 2.1428571428571428e-07, "epoch": 0.02247191011235955, "percentage": 0.76, "elapsed_time": "0:00:57", "remaining_time": "2:04:55"}
2
+ {"current_steps": 2, "total_steps": 132, "loss": 0.6931, "accuracy": 0.0, "lr": 4.2857142857142857e-07, "epoch": 0.0449438202247191, "percentage": 1.52, "elapsed_time": "0:01:46", "remaining_time": "1:55:00"}
3
+ {"current_steps": 3, "total_steps": 132, "loss": 0.9204, "accuracy": 0.46875, "lr": 6.428571428571428e-07, "epoch": 0.06741573033707865, "percentage": 2.27, "elapsed_time": "0:02:22", "remaining_time": "1:42:27"}
4
+ {"current_steps": 4, "total_steps": 132, "loss": 0.6588, "accuracy": 0.546875, "lr": 8.571428571428571e-07, "epoch": 0.0898876404494382, "percentage": 3.03, "elapsed_time": "0:03:00", "remaining_time": "1:36:15"}
5
+ {"current_steps": 5, "total_steps": 132, "loss": 0.5615, "accuracy": 0.6875, "lr": 1.0714285714285716e-06, "epoch": 0.11235955056179775, "percentage": 3.79, "elapsed_time": "0:03:38", "remaining_time": "1:32:19"}
6
+ {"current_steps": 6, "total_steps": 132, "loss": 0.5469, "accuracy": 0.71875, "lr": 1.2857142857142856e-06, "epoch": 0.1348314606741573, "percentage": 4.55, "elapsed_time": "0:04:20", "remaining_time": "1:31:03"}
7
+ {"current_steps": 7, "total_steps": 132, "loss": 0.4709, "accuracy": 0.734375, "lr": 1.5e-06, "epoch": 0.15730337078651685, "percentage": 5.3, "elapsed_time": "0:05:00", "remaining_time": "1:29:27"}
8
+ {"current_steps": 8, "total_steps": 132, "loss": 0.396, "accuracy": 0.75, "lr": 1.7142857142857143e-06, "epoch": 0.1797752808988764, "percentage": 6.06, "elapsed_time": "0:05:38", "remaining_time": "1:27:34"}
9
+ {"current_steps": 9, "total_steps": 132, "loss": 0.448, "accuracy": 0.71875, "lr": 1.928571428571429e-06, "epoch": 0.20224719101123595, "percentage": 6.82, "elapsed_time": "0:06:15", "remaining_time": "1:25:31"}
10
+ {"current_steps": 10, "total_steps": 132, "loss": 0.3618, "accuracy": 0.765625, "lr": 2.142857142857143e-06, "epoch": 0.2247191011235955, "percentage": 7.58, "elapsed_time": "0:06:52", "remaining_time": "1:23:53"}
11
+ {"current_steps": 11, "total_steps": 132, "loss": 0.3962, "accuracy": 0.703125, "lr": 2.357142857142857e-06, "epoch": 0.24719101123595505, "percentage": 8.33, "elapsed_time": "0:07:33", "remaining_time": "1:23:13"}
12
+ {"current_steps": 12, "total_steps": 132, "loss": 0.4155, "accuracy": 0.6875, "lr": 2.571428571428571e-06, "epoch": 0.2696629213483146, "percentage": 9.09, "elapsed_time": "0:08:16", "remaining_time": "1:22:45"}
13
+ {"current_steps": 13, "total_steps": 132, "loss": 0.3343, "accuracy": 0.75, "lr": 2.785714285714286e-06, "epoch": 0.29213483146067415, "percentage": 9.85, "elapsed_time": "0:08:54", "remaining_time": "1:21:35"}
14
+ {"current_steps": 14, "total_steps": 132, "loss": 0.3189, "accuracy": 0.890625, "lr": 3e-06, "epoch": 0.3146067415730337, "percentage": 10.61, "elapsed_time": "0:09:33", "remaining_time": "1:20:32"}
15
+ {"current_steps": 15, "total_steps": 132, "loss": 0.3903, "accuracy": 0.765625, "lr": 2.999468416685179e-06, "epoch": 0.33707865168539325, "percentage": 11.36, "elapsed_time": "0:10:15", "remaining_time": "1:20:00"}
16
+ {"current_steps": 16, "total_steps": 132, "loss": 0.3059, "accuracy": 0.78125, "lr": 2.9978740435151427e-06, "epoch": 0.3595505617977528, "percentage": 12.12, "elapsed_time": "0:10:53", "remaining_time": "1:18:59"}
17
+ {"current_steps": 17, "total_steps": 132, "loss": 0.3808, "accuracy": 0.75, "lr": 2.995218010546125e-06, "epoch": 0.38202247191011235, "percentage": 12.88, "elapsed_time": "0:11:30", "remaining_time": "1:17:52"}
18
+ {"current_steps": 18, "total_steps": 132, "loss": 0.3542, "accuracy": 0.8125, "lr": 2.9915022003152055e-06, "epoch": 0.4044943820224719, "percentage": 13.64, "elapsed_time": "0:12:08", "remaining_time": "1:16:51"}
19
+ {"current_steps": 19, "total_steps": 132, "loss": 0.367, "accuracy": 0.765625, "lr": 2.986729246506011e-06, "epoch": 0.42696629213483145, "percentage": 14.39, "elapsed_time": "0:12:42", "remaining_time": "1:15:36"}
20
+ {"current_steps": 20, "total_steps": 132, "loss": 0.4946, "accuracy": 0.75, "lr": 2.980902532082017e-06, "epoch": 0.449438202247191, "percentage": 15.15, "elapsed_time": "0:13:16", "remaining_time": "1:14:17"}
21
+ {"current_steps": 21, "total_steps": 132, "loss": 0.4802, "accuracy": 0.71875, "lr": 2.9740261868887817e-06, "epoch": 0.47191011235955055, "percentage": 15.91, "elapsed_time": "0:13:53", "remaining_time": "1:13:26"}
22
+ {"current_steps": 22, "total_steps": 132, "loss": 0.4924, "accuracy": 0.828125, "lr": 2.9661050847268e-06, "epoch": 0.4943820224719101, "percentage": 16.67, "elapsed_time": "0:14:32", "remaining_time": "1:12:41"}
23
+ {"current_steps": 23, "total_steps": 132, "loss": 0.4932, "accuracy": 0.734375, "lr": 2.957144839897065e-06, "epoch": 0.5168539325842697, "percentage": 17.42, "elapsed_time": "0:15:09", "remaining_time": "1:11:51"}
24
+ {"current_steps": 24, "total_steps": 132, "loss": 0.3869, "accuracy": 0.796875, "lr": 2.947151803221774e-06, "epoch": 0.5393258426966292, "percentage": 18.18, "elapsed_time": "0:15:54", "remaining_time": "1:11:35"}
25
+ {"current_steps": 25, "total_steps": 132, "loss": 0.3981, "accuracy": 0.78125, "lr": 2.936133057543008e-06, "epoch": 0.5617977528089888, "percentage": 18.94, "elapsed_time": "0:16:29", "remaining_time": "1:10:35"}
26
+ {"current_steps": 26, "total_steps": 132, "loss": 0.613, "accuracy": 0.703125, "lr": 2.924096412702572e-06, "epoch": 0.5842696629213483, "percentage": 19.7, "elapsed_time": "0:17:07", "remaining_time": "1:09:48"}
27
+ {"current_steps": 27, "total_steps": 132, "loss": 0.4005, "accuracy": 0.859375, "lr": 2.91105040000655e-06, "epoch": 0.6067415730337079, "percentage": 20.45, "elapsed_time": "0:17:46", "remaining_time": "1:09:08"}
28
+ {"current_steps": 28, "total_steps": 132, "loss": 0.522, "accuracy": 0.765625, "lr": 2.897004266178508e-06, "epoch": 0.6292134831460674, "percentage": 21.21, "elapsed_time": "0:18:28", "remaining_time": "1:08:37"}
29
+ {"current_steps": 29, "total_steps": 132, "loss": 0.3816, "accuracy": 0.765625, "lr": 2.8819679668056195e-06, "epoch": 0.651685393258427, "percentage": 21.97, "elapsed_time": "0:19:05", "remaining_time": "1:07:49"}
30
+ {"current_steps": 30, "total_steps": 132, "loss": 0.4913, "accuracy": 0.765625, "lr": 2.8659521592823702e-06, "epoch": 0.6741573033707865, "percentage": 22.73, "elapsed_time": "0:19:43", "remaining_time": "1:07:04"}
31
+ {"current_steps": 31, "total_steps": 132, "loss": 0.708, "accuracy": 0.65625, "lr": 2.848968195256829e-06, "epoch": 0.6966292134831461, "percentage": 23.48, "elapsed_time": "0:20:25", "remaining_time": "1:06:31"}
32
+ {"current_steps": 32, "total_steps": 132, "loss": 0.5514, "accuracy": 0.6875, "lr": 2.831028112584857e-06, "epoch": 0.7191011235955056, "percentage": 24.24, "elapsed_time": "0:21:02", "remaining_time": "1:05:45"}
33
+ {"current_steps": 33, "total_steps": 132, "loss": 0.4043, "accuracy": 0.875, "lr": 2.812144626797942e-06, "epoch": 0.7415730337078652, "percentage": 25.0, "elapsed_time": "0:21:45", "remaining_time": "1:05:17"}
34
+ {"current_steps": 34, "total_steps": 132, "loss": 0.4825, "accuracy": 0.796875, "lr": 2.792331122090709e-06, "epoch": 0.7640449438202247, "percentage": 25.76, "elapsed_time": "0:22:23", "remaining_time": "1:04:33"}
35
+ {"current_steps": 35, "total_steps": 132, "loss": 0.4821, "accuracy": 0.84375, "lr": 2.7716016418345064e-06, "epoch": 0.7865168539325843, "percentage": 26.52, "elapsed_time": "0:23:01", "remaining_time": "1:03:47"}
36
+ {"current_steps": 36, "total_steps": 132, "loss": 0.5359, "accuracy": 0.796875, "lr": 2.7499708786237724e-06, "epoch": 0.8089887640449438, "percentage": 27.27, "elapsed_time": "0:23:42", "remaining_time": "1:03:12"}
37
+ {"current_steps": 37, "total_steps": 132, "loss": 0.5118, "accuracy": 0.859375, "lr": 2.7274541638622533e-06, "epoch": 0.8314606741573034, "percentage": 28.03, "elapsed_time": "0:24:21", "remaining_time": "1:02:33"}
38
+ {"current_steps": 38, "total_steps": 132, "loss": 0.5253, "accuracy": 0.8125, "lr": 2.7040674568964452e-06, "epoch": 0.8539325842696629, "percentage": 28.79, "elapsed_time": "0:24:59", "remaining_time": "1:01:50"}
39
+ {"current_steps": 39, "total_steps": 132, "loss": 0.5094, "accuracy": 0.765625, "lr": 2.679827333703964e-06, "epoch": 0.8764044943820225, "percentage": 29.55, "elapsed_time": "0:25:36", "remaining_time": "1:01:03"}
40
+ {"current_steps": 40, "total_steps": 132, "loss": 0.7054, "accuracy": 0.703125, "lr": 2.6547509751448593e-06, "epoch": 0.898876404494382, "percentage": 30.3, "elapsed_time": "0:26:18", "remaining_time": "1:00:30"}
41
+ {"current_steps": 41, "total_steps": 132, "loss": 0.6426, "accuracy": 0.78125, "lr": 2.6288561547842076e-06, "epoch": 0.9213483146067416, "percentage": 31.06, "elapsed_time": "0:26:57", "remaining_time": "0:59:49"}
42
+ {"current_steps": 42, "total_steps": 132, "loss": 0.5032, "accuracy": 0.796875, "lr": 2.602161226294601e-06, "epoch": 0.9438202247191011, "percentage": 31.82, "elapsed_time": "0:27:36", "remaining_time": "0:59:10"}
43
+ {"current_steps": 43, "total_steps": 132, "loss": 0.4432, "accuracy": 0.828125, "lr": 2.5746851104474728e-06, "epoch": 0.9662921348314607, "percentage": 32.58, "elapsed_time": "0:28:15", "remaining_time": "0:58:30"}
44
+ {"current_steps": 44, "total_steps": 132, "loss": 0.5194, "accuracy": 0.78125, "lr": 2.5464472817024772e-06, "epoch": 0.9887640449438202, "percentage": 33.33, "elapsed_time": "0:28:51", "remaining_time": "0:57:43"}
45
+ {"current_steps": 45, "total_steps": 132, "loss": 0.2103, "accuracy": 0.84375, "lr": 2.517467754404424e-06, "epoch": 1.0, "percentage": 34.09, "elapsed_time": "0:29:07", "remaining_time": "0:56:19"}
46
+ {"current_steps": 46, "total_steps": 132, "loss": 0.0037, "accuracy": 1.0, "lr": 2.487767068597558e-06, "epoch": 1.0224719101123596, "percentage": 34.85, "elapsed_time": "0:29:47", "remaining_time": "0:55:41"}
47
+ {"current_steps": 47, "total_steps": 132, "loss": 0.0114, "accuracy": 0.984375, "lr": 2.4573662754672303e-06, "epoch": 1.0449438202247192, "percentage": 35.61, "elapsed_time": "0:30:25", "remaining_time": "0:55:01"}
48
+ {"current_steps": 48, "total_steps": 132, "loss": 0.0154, "accuracy": 0.984375, "lr": 2.426286922419288e-06, "epoch": 1.0674157303370786, "percentage": 36.36, "elapsed_time": "0:31:02", "remaining_time": "0:54:19"}
49
+ {"current_steps": 49, "total_steps": 132, "loss": 0.0024, "accuracy": 1.0, "lr": 2.3945510378077523e-06, "epoch": 1.0898876404494382, "percentage": 37.12, "elapsed_time": "0:31:40", "remaining_time": "0:53:39"}
50
+ {"current_steps": 50, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 2.3621811153216106e-06, "epoch": 1.1123595505617978, "percentage": 37.88, "elapsed_time": "0:32:24", "remaining_time": "0:53:08"}
51
+ {"current_steps": 51, "total_steps": 132, "loss": 0.0116, "accuracy": 0.984375, "lr": 2.32920009804179e-06, "epoch": 1.1348314606741572, "percentage": 38.64, "elapsed_time": "0:33:34", "remaining_time": "0:53:20"}
52
+ {"current_steps": 52, "total_steps": 132, "loss": 0.0147, "accuracy": 0.984375, "lr": 2.2956313621796135e-06, "epoch": 1.1573033707865168, "percentage": 39.39, "elapsed_time": "0:34:14", "remaining_time": "0:52:41"}
53
+ {"current_steps": 53, "total_steps": 132, "loss": 0.0007, "accuracy": 1.0, "lr": 2.26149870050826e-06, "epoch": 1.1797752808988764, "percentage": 40.15, "elapsed_time": "0:34:51", "remaining_time": "0:51:56"}
54
+ {"current_steps": 54, "total_steps": 132, "loss": 0.001, "accuracy": 1.0, "lr": 2.2268263054989753e-06, "epoch": 1.202247191011236, "percentage": 40.91, "elapsed_time": "0:35:27", "remaining_time": "0:51:13"}
55
+ {"current_steps": 55, "total_steps": 132, "loss": 0.0117, "accuracy": 0.984375, "lr": 2.191638752173989e-06, "epoch": 1.2247191011235956, "percentage": 41.67, "elapsed_time": "0:36:06", "remaining_time": "0:50:32"}
56
+ {"current_steps": 56, "total_steps": 132, "loss": 0.0003, "accuracy": 1.0, "lr": 2.1559609806882834e-06, "epoch": 1.247191011235955, "percentage": 42.42, "elapsed_time": "0:36:44", "remaining_time": "0:49:51"}
57
+ {"current_steps": 57, "total_steps": 132, "loss": 0.0112, "accuracy": 0.984375, "lr": 2.1198182786525674e-06, "epoch": 1.2696629213483146, "percentage": 43.18, "elapsed_time": "0:37:22", "remaining_time": "0:49:10"}
58
+ {"current_steps": 58, "total_steps": 132, "loss": 0.0051, "accuracy": 1.0, "lr": 2.0832362632099813e-06, "epoch": 1.2921348314606742, "percentage": 43.94, "elapsed_time": "0:38:01", "remaining_time": "0:48:30"}
59
+ {"current_steps": 59, "total_steps": 132, "loss": 0.0001, "accuracy": 1.0, "lr": 2.0462408628792335e-06, "epoch": 1.3146067415730336, "percentage": 44.7, "elapsed_time": "0:38:37", "remaining_time": "0:47:47"}
60
+ {"current_steps": 60, "total_steps": 132, "loss": 0.0157, "accuracy": 0.984375, "lr": 2.008858299177045e-06, "epoch": 1.3370786516853932, "percentage": 45.45, "elapsed_time": "0:39:17", "remaining_time": "0:47:09"}
61
+ {"current_steps": 61, "total_steps": 132, "loss": 0.0175, "accuracy": 0.984375, "lr": 1.9711150680329234e-06, "epoch": 1.3595505617977528, "percentage": 46.21, "elapsed_time": "0:39:58", "remaining_time": "0:46:31"}
62
+ {"current_steps": 62, "total_steps": 132, "loss": 0.0118, "accuracy": 0.984375, "lr": 1.9330379210094315e-06, "epoch": 1.3820224719101124, "percentage": 46.97, "elapsed_time": "0:40:37", "remaining_time": "0:45:51"}
63
+ {"current_steps": 63, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 1.8946538463412818e-06, "epoch": 1.404494382022472, "percentage": 47.73, "elapsed_time": "0:41:17", "remaining_time": "0:45:13"}
64
+ {"current_steps": 64, "total_steps": 132, "loss": 0.0092, "accuracy": 1.0, "lr": 1.8559900498066726e-06, "epoch": 1.4269662921348314, "percentage": 48.48, "elapsed_time": "0:41:53", "remaining_time": "0:44:31"}
65
+ {"current_steps": 65, "total_steps": 132, "loss": 0.0009, "accuracy": 1.0, "lr": 1.8170739354444366e-06, "epoch": 1.449438202247191, "percentage": 49.24, "elapsed_time": "0:42:29", "remaining_time": "0:43:48"}
66
+ {"current_steps": 66, "total_steps": 132, "loss": 0.0001, "accuracy": 1.0, "lr": 1.7779330861306717e-06, "epoch": 1.4719101123595506, "percentage": 50.0, "elapsed_time": "0:43:10", "remaining_time": "0:43:10"}
67
+ {"current_steps": 67, "total_steps": 132, "loss": 0.0081, "accuracy": 1.0, "lr": 1.738595244028608e-06, "epoch": 1.49438202247191, "percentage": 50.76, "elapsed_time": "0:43:47", "remaining_time": "0:42:29"}
68
+ {"current_steps": 68, "total_steps": 132, "loss": 0.0112, "accuracy": 0.984375, "lr": 1.699088290925583e-06, "epoch": 1.5168539325842696, "percentage": 51.52, "elapsed_time": "0:44:31", "remaining_time": "0:41:54"}
69
+ {"current_steps": 69, "total_steps": 132, "loss": 0.026, "accuracy": 0.984375, "lr": 1.6594402284710481e-06, "epoch": 1.5393258426966292, "percentage": 52.27, "elapsed_time": "0:45:05", "remaining_time": "0:41:10"}
70
+ {"current_steps": 70, "total_steps": 132, "loss": 0.003, "accuracy": 1.0, "lr": 1.6196791583296247e-06, "epoch": 1.5617977528089888, "percentage": 53.03, "elapsed_time": "0:45:41", "remaining_time": "0:40:28"}
71
+ {"current_steps": 71, "total_steps": 132, "loss": 0.0118, "accuracy": 0.984375, "lr": 1.579833262263268e-06, "epoch": 1.5842696629213484, "percentage": 53.79, "elapsed_time": "0:46:17", "remaining_time": "0:39:46"}
72
+ {"current_steps": 72, "total_steps": 132, "loss": 0.0218, "accuracy": 0.984375, "lr": 1.5399307821566623e-06, "epoch": 1.606741573033708, "percentage": 54.55, "elapsed_time": "0:46:57", "remaining_time": "0:39:07"}
73
+ {"current_steps": 73, "total_steps": 132, "loss": 0.0117, "accuracy": 0.984375, "lr": 1.5e-06, "epoch": 1.6292134831460674, "percentage": 55.3, "elapsed_time": "0:47:37", "remaining_time": "0:38:29"}
74
+ {"current_steps": 74, "total_steps": 132, "loss": 0.004, "accuracy": 1.0, "lr": 1.460069217843338e-06, "epoch": 1.651685393258427, "percentage": 56.06, "elapsed_time": "0:48:14", "remaining_time": "0:37:48"}
75
+ {"current_steps": 75, "total_steps": 132, "loss": 0.0112, "accuracy": 0.984375, "lr": 1.4201667377367324e-06, "epoch": 1.6741573033707864, "percentage": 56.82, "elapsed_time": "0:48:50", "remaining_time": "0:37:07"}
76
+ {"current_steps": 76, "total_steps": 132, "loss": 0.0026, "accuracy": 1.0, "lr": 1.3803208416703752e-06, "epoch": 1.696629213483146, "percentage": 57.58, "elapsed_time": "0:49:23", "remaining_time": "0:36:23"}
77
+ {"current_steps": 77, "total_steps": 132, "loss": 0.0017, "accuracy": 1.0, "lr": 1.3405597715289522e-06, "epoch": 1.7191011235955056, "percentage": 58.33, "elapsed_time": "0:50:04", "remaining_time": "0:35:45"}
78
+ {"current_steps": 78, "total_steps": 132, "loss": 0.0111, "accuracy": 1.0, "lr": 1.3009117090744173e-06, "epoch": 1.7415730337078652, "percentage": 59.09, "elapsed_time": "0:50:44", "remaining_time": "0:35:07"}
79
+ {"current_steps": 79, "total_steps": 132, "loss": 0.0003, "accuracy": 1.0, "lr": 1.2614047559713923e-06, "epoch": 1.7640449438202248, "percentage": 59.85, "elapsed_time": "0:51:21", "remaining_time": "0:34:27"}
80
+ {"current_steps": 80, "total_steps": 132, "loss": 0.0112, "accuracy": 0.984375, "lr": 1.2220669138693288e-06, "epoch": 1.7865168539325844, "percentage": 60.61, "elapsed_time": "0:52:00", "remaining_time": "0:33:48"}
81
+ {"current_steps": 81, "total_steps": 132, "loss": 0.0108, "accuracy": 1.0, "lr": 1.1829260645555634e-06, "epoch": 1.8089887640449438, "percentage": 61.36, "elapsed_time": "0:52:40", "remaining_time": "0:33:09"}
82
+ {"current_steps": 82, "total_steps": 132, "loss": 0.001, "accuracy": 1.0, "lr": 1.1440099501933277e-06, "epoch": 1.8314606741573034, "percentage": 62.12, "elapsed_time": "0:53:27", "remaining_time": "0:32:35"}
83
+ {"current_steps": 83, "total_steps": 132, "loss": 0.0001, "accuracy": 1.0, "lr": 1.1053461536587183e-06, "epoch": 1.8539325842696628, "percentage": 62.88, "elapsed_time": "0:54:08", "remaining_time": "0:31:57"}
84
+ {"current_steps": 84, "total_steps": 132, "loss": 0.0007, "accuracy": 1.0, "lr": 1.0669620789905688e-06, "epoch": 1.8764044943820224, "percentage": 63.64, "elapsed_time": "0:54:47", "remaining_time": "0:31:18"}
85
+ {"current_steps": 85, "total_steps": 132, "loss": 0.0001, "accuracy": 1.0, "lr": 1.0288849319670773e-06, "epoch": 1.898876404494382, "percentage": 64.39, "elapsed_time": "0:55:21", "remaining_time": "0:30:36"}
86
+ {"current_steps": 86, "total_steps": 132, "loss": 0.0325, "accuracy": 0.953125, "lr": 9.911417008229545e-07, "epoch": 1.9213483146067416, "percentage": 65.15, "elapsed_time": "0:55:59", "remaining_time": "0:29:56"}
87
+ {"current_steps": 87, "total_steps": 132, "loss": 0.0001, "accuracy": 1.0, "lr": 9.537591371207668e-07, "epoch": 1.9438202247191012, "percentage": 65.91, "elapsed_time": "0:56:32", "remaining_time": "0:29:14"}
88
+ {"current_steps": 88, "total_steps": 132, "loss": 0.0117, "accuracy": 0.984375, "lr": 9.167637367900192e-07, "epoch": 1.9662921348314608, "percentage": 66.67, "elapsed_time": "0:57:08", "remaining_time": "0:28:34"}
89
+ {"current_steps": 89, "total_steps": 132, "loss": 0.0002, "accuracy": 1.0, "lr": 8.801817213474331e-07, "epoch": 1.9887640449438202, "percentage": 67.42, "elapsed_time": "0:57:46", "remaining_time": "0:27:54"}
90
+ {"current_steps": 90, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 8.44039019311717e-07, "epoch": 2.0, "percentage": 68.18, "elapsed_time": "0:58:06", "remaining_time": "0:27:07"}
91
+ {"current_steps": 91, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 8.08361247826011e-07, "epoch": 2.0224719101123596, "percentage": 68.94, "elapsed_time": "0:58:47", "remaining_time": "0:26:29"}
92
+ {"current_steps": 92, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 7.731736945010249e-07, "epoch": 2.044943820224719, "percentage": 69.7, "elapsed_time": "0:59:30", "remaining_time": "0:25:52"}
93
+ {"current_steps": 93, "total_steps": 132, "loss": 0.0109, "accuracy": 0.984375, "lr": 7.385012994917405e-07, "epoch": 2.067415730337079, "percentage": 70.45, "elapsed_time": "1:00:09", "remaining_time": "0:25:13"}
94
+ {"current_steps": 94, "total_steps": 132, "loss": 0.0109, "accuracy": 0.984375, "lr": 7.043686378203864e-07, "epoch": 2.0898876404494384, "percentage": 71.21, "elapsed_time": "1:00:51", "remaining_time": "0:24:36"}
95
+ {"current_steps": 95, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 6.707999019582104e-07, "epoch": 2.1123595505617976, "percentage": 71.97, "elapsed_time": "1:01:26", "remaining_time": "0:23:55"}
96
+ {"current_steps": 96, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 6.378188846783898e-07, "epoch": 2.134831460674157, "percentage": 72.73, "elapsed_time": "1:02:03", "remaining_time": "0:23:16"}
97
+ {"current_steps": 97, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 6.054489621922477e-07, "epoch": 2.157303370786517, "percentage": 73.48, "elapsed_time": "1:02:40", "remaining_time": "0:22:36"}
98
+ {"current_steps": 98, "total_steps": 132, "loss": 0.011, "accuracy": 0.984375, "lr": 5.737130775807122e-07, "epoch": 2.1797752808988764, "percentage": 74.24, "elapsed_time": "1:03:22", "remaining_time": "0:21:59"}
99
+ {"current_steps": 99, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 5.426337245327703e-07, "epoch": 2.202247191011236, "percentage": 75.0, "elapsed_time": "1:04:00", "remaining_time": "0:21:20"}
100
+ {"current_steps": 100, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 5.122329314024422e-07, "epoch": 2.2247191011235956, "percentage": 75.76, "elapsed_time": "1:04:35", "remaining_time": "0:20:40"}
101
+ {"current_steps": 101, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 4.825322455955759e-07, "epoch": 2.247191011235955, "percentage": 76.52, "elapsed_time": "1:05:50", "remaining_time": "0:20:12"}
102
+ {"current_steps": 102, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 4.5355271829752307e-07, "epoch": 2.2696629213483144, "percentage": 77.27, "elapsed_time": "1:06:26", "remaining_time": "0:19:32"}
103
+ {"current_steps": 103, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 4.2531488955252726e-07, "epoch": 2.292134831460674, "percentage": 78.03, "elapsed_time": "1:07:05", "remaining_time": "0:18:53"}
104
+ {"current_steps": 104, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 3.978387737053994e-07, "epoch": 2.3146067415730336, "percentage": 78.79, "elapsed_time": "1:07:46", "remaining_time": "0:18:14"}
105
+ {"current_steps": 105, "total_steps": 132, "loss": 0.0108, "accuracy": 1.0, "lr": 3.7114384521579234e-07, "epoch": 2.337078651685393, "percentage": 79.55, "elapsed_time": "1:08:26", "remaining_time": "0:17:36"}
106
+ {"current_steps": 106, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 3.4524902485514043e-07, "epoch": 2.359550561797753, "percentage": 80.3, "elapsed_time": "1:09:03", "remaining_time": "0:16:56"}
107
+ {"current_steps": 107, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 3.201726662960363e-07, "epoch": 2.3820224719101124, "percentage": 81.06, "elapsed_time": "1:09:41", "remaining_time": "0:16:16"}
108
+ {"current_steps": 108, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 2.9593254310355485e-07, "epoch": 2.404494382022472, "percentage": 81.82, "elapsed_time": "1:10:17", "remaining_time": "0:15:37"}
109
+ {"current_steps": 109, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 2.725458361377465e-07, "epoch": 2.4269662921348316, "percentage": 82.58, "elapsed_time": "1:10:56", "remaining_time": "0:14:58"}
110
+ {"current_steps": 110, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 2.5002912137622743e-07, "epoch": 2.449438202247191, "percentage": 83.33, "elapsed_time": "1:11:32", "remaining_time": "0:14:18"}
111
+ {"current_steps": 111, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 2.2839835816549365e-07, "epoch": 2.4719101123595504, "percentage": 84.09, "elapsed_time": "1:12:10", "remaining_time": "0:13:39"}
112
+ {"current_steps": 112, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 2.0766887790929072e-07, "epoch": 2.49438202247191, "percentage": 84.85, "elapsed_time": "1:12:48", "remaining_time": "0:13:00"}
113
+ {"current_steps": 113, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 1.8785537320205808e-07, "epoch": 2.5168539325842696, "percentage": 85.61, "elapsed_time": "1:13:26", "remaining_time": "0:12:20"}
114
+ {"current_steps": 114, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 1.6897188741514286e-07, "epoch": 2.539325842696629, "percentage": 86.36, "elapsed_time": "1:14:04", "remaining_time": "0:11:41"}
115
+ {"current_steps": 115, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 1.510318047431713e-07, "epoch": 2.561797752808989, "percentage": 87.12, "elapsed_time": "1:14:41", "remaining_time": "0:11:02"}
116
+ {"current_steps": 116, "total_steps": 132, "loss": 0.0001, "accuracy": 1.0, "lr": 1.3404784071763015e-07, "epoch": 2.5842696629213484, "percentage": 87.88, "elapsed_time": "1:15:16", "remaining_time": "0:10:22"}
117
+ {"current_steps": 117, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 1.1803203319438056e-07, "epoch": 2.606741573033708, "percentage": 88.64, "elapsed_time": "1:15:52", "remaining_time": "0:09:43"}
118
+ {"current_steps": 118, "total_steps": 132, "loss": 0.0219, "accuracy": 0.984375, "lr": 1.0299573382149235e-07, "epoch": 2.629213483146067, "percentage": 89.39, "elapsed_time": "1:16:31", "remaining_time": "0:09:04"}
119
+ {"current_steps": 119, "total_steps": 132, "loss": 0.0109, "accuracy": 1.0, "lr": 8.894959999345015e-08, "epoch": 2.6516853932584272, "percentage": 90.15, "elapsed_time": "1:17:08", "remaining_time": "0:08:25"}
120
+ {"current_steps": 120, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 7.590358729742808e-08, "epoch": 2.6741573033707864, "percentage": 90.91, "elapsed_time": "1:17:47", "remaining_time": "0:07:46"}
121
+ {"current_steps": 121, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 6.386694245699181e-08, "epoch": 2.696629213483146, "percentage": 91.67, "elapsed_time": "1:18:26", "remaining_time": "0:07:07"}
122
+ {"current_steps": 122, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 5.284819677822611e-08, "epoch": 2.7191011235955056, "percentage": 92.42, "elapsed_time": "1:19:04", "remaining_time": "0:06:28"}
123
+ {"current_steps": 123, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 4.285516010293522e-08, "epoch": 2.741573033707865, "percentage": 93.18, "elapsed_time": "1:19:42", "remaining_time": "0:05:49"}
124
+ {"current_steps": 124, "total_steps": 132, "loss": 0.0217, "accuracy": 0.984375, "lr": 3.389491527319999e-08, "epoch": 2.764044943820225, "percentage": 93.94, "elapsed_time": "1:20:20", "remaining_time": "0:05:10"}
125
+ {"current_steps": 125, "total_steps": 132, "loss": 0.0002, "accuracy": 1.0, "lr": 2.5973813111218548e-08, "epoch": 2.7865168539325844, "percentage": 94.7, "elapsed_time": "1:20:55", "remaining_time": "0:04:31"}
126
+ {"current_steps": 126, "total_steps": 132, "loss": 0.0217, "accuracy": 0.984375, "lr": 1.909746791798317e-08, "epoch": 2.808988764044944, "percentage": 95.45, "elapsed_time": "1:21:33", "remaining_time": "0:03:53"}
127
+ {"current_steps": 127, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 1.3270753493989374e-08, "epoch": 2.831460674157303, "percentage": 96.21, "elapsed_time": "1:22:13", "remaining_time": "0:03:14"}
128
+ {"current_steps": 128, "total_steps": 132, "loss": 0.0108, "accuracy": 1.0, "lr": 8.49779968479436e-09, "epoch": 2.853932584269663, "percentage": 96.97, "elapsed_time": "1:22:50", "remaining_time": "0:02:35"}
129
+ {"current_steps": 129, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 4.781989453874814e-09, "epoch": 2.8764044943820224, "percentage": 97.73, "elapsed_time": "1:23:28", "remaining_time": "0:01:56"}
130
+ {"current_steps": 130, "total_steps": 132, "loss": 0.0, "accuracy": 1.0, "lr": 2.1259564848570834e-09, "epoch": 2.898876404494382, "percentage": 98.48, "elapsed_time": "1:24:08", "remaining_time": "0:01:17"}
131
+ {"current_steps": 131, "total_steps": 132, "loss": 0.0108, "accuracy": 0.984375, "lr": 5.315833148210603e-10, "epoch": 2.9213483146067416, "percentage": 99.24, "elapsed_time": "1:24:51", "remaining_time": "0:00:38"}
132
+ {"current_steps": 132, "total_steps": 132, "loss": 0.0217, "accuracy": 0.984375, "lr": 0.0, "epoch": 2.943820224719101, "percentage": 100.0, "elapsed_time": "1:25:33", "remaining_time": "0:00:00"}
133
+ {"current_steps": 132, "total_steps": 132, "epoch": 2.943820224719101, "percentage": 100.0, "elapsed_time": "1:26:04", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,2022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.943820224719101,
5
+ "eval_steps": 500,
6
+ "global_step": 132,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02247191011235955,
13
+ "grad_norm": 489.5653076171875,
14
+ "learning_rate": 2.1428571428571428e-07,
15
+ "logits/chosen": 1.4551408290863037,
16
+ "logits/rejected": 1.478129267692566,
17
+ "logps/chosen": -2968.771240234375,
18
+ "logps/rejected": -3035.35302734375,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.0449438202247191,
28
+ "grad_norm": 419.54876708984375,
29
+ "learning_rate": 4.2857142857142857e-07,
30
+ "logits/chosen": 1.5314003229141235,
31
+ "logits/rejected": 1.4525893926620483,
32
+ "logps/chosen": -3010.43994140625,
33
+ "logps/rejected": -2926.948974609375,
34
+ "loss": 0.6931,
35
+ "rewards/accuracies": 0.0,
36
+ "rewards/chosen": 0.0,
37
+ "rewards/margins": 0.0,
38
+ "rewards/rejected": 0.0,
39
+ "step": 2
40
+ },
41
+ {
42
+ "epoch": 0.06741573033707865,
43
+ "grad_norm": 789.9224243164062,
44
+ "learning_rate": 6.428571428571428e-07,
45
+ "logits/chosen": 1.482939600944519,
46
+ "logits/rejected": 1.5616533756256104,
47
+ "logps/chosen": -2998.501708984375,
48
+ "logps/rejected": -3179.81982421875,
49
+ "loss": 0.9204,
50
+ "rewards/accuracies": 0.46875,
51
+ "rewards/chosen": -0.08596238493919373,
52
+ "rewards/margins": -0.19251862168312073,
53
+ "rewards/rejected": 0.10655620694160461,
54
+ "step": 3
55
+ },
56
+ {
57
+ "epoch": 0.0898876404494382,
58
+ "grad_norm": 378.14190673828125,
59
+ "learning_rate": 8.571428571428571e-07,
60
+ "logits/chosen": 1.6036081314086914,
61
+ "logits/rejected": 1.7028334140777588,
62
+ "logps/chosen": -2979.7060546875,
63
+ "logps/rejected": -2913.69091796875,
64
+ "loss": 0.6588,
65
+ "rewards/accuracies": 0.546875,
66
+ "rewards/chosen": -0.28274667263031006,
67
+ "rewards/margins": 0.22482016682624817,
68
+ "rewards/rejected": -0.5075668692588806,
69
+ "step": 4
70
+ },
71
+ {
72
+ "epoch": 0.11235955056179775,
73
+ "grad_norm": 307.0648193359375,
74
+ "learning_rate": 1.0714285714285716e-06,
75
+ "logits/chosen": 1.3923085927963257,
76
+ "logits/rejected": 1.4200749397277832,
77
+ "logps/chosen": -3015.828125,
78
+ "logps/rejected": -3068.435302734375,
79
+ "loss": 0.5615,
80
+ "rewards/accuracies": 0.6875,
81
+ "rewards/chosen": -0.03155745938420296,
82
+ "rewards/margins": 0.5133614540100098,
83
+ "rewards/rejected": -0.5449188947677612,
84
+ "step": 5
85
+ },
86
+ {
87
+ "epoch": 0.1348314606741573,
88
+ "grad_norm": 282.67034912109375,
89
+ "learning_rate": 1.2857142857142856e-06,
90
+ "logits/chosen": 1.5581945180892944,
91
+ "logits/rejected": 1.405899167060852,
92
+ "logps/chosen": -3204.767333984375,
93
+ "logps/rejected": -3163.357177734375,
94
+ "loss": 0.5469,
95
+ "rewards/accuracies": 0.71875,
96
+ "rewards/chosen": -0.25397348403930664,
97
+ "rewards/margins": 0.6482839584350586,
98
+ "rewards/rejected": -0.9022574424743652,
99
+ "step": 6
100
+ },
101
+ {
102
+ "epoch": 0.15730337078651685,
103
+ "grad_norm": 218.5866241455078,
104
+ "learning_rate": 1.5e-06,
105
+ "logits/chosen": 1.496790885925293,
106
+ "logits/rejected": 1.4303985834121704,
107
+ "logps/chosen": -3185.8203125,
108
+ "logps/rejected": -3225.123046875,
109
+ "loss": 0.4709,
110
+ "rewards/accuracies": 0.734375,
111
+ "rewards/chosen": -0.04301854223012924,
112
+ "rewards/margins": 1.6269282102584839,
113
+ "rewards/rejected": -1.6699466705322266,
114
+ "step": 7
115
+ },
116
+ {
117
+ "epoch": 0.1797752808988764,
118
+ "grad_norm": 181.489501953125,
119
+ "learning_rate": 1.7142857142857143e-06,
120
+ "logits/chosen": 1.6130130290985107,
121
+ "logits/rejected": 1.5007115602493286,
122
+ "logps/chosen": -3087.791748046875,
123
+ "logps/rejected": -2948.8115234375,
124
+ "loss": 0.396,
125
+ "rewards/accuracies": 0.75,
126
+ "rewards/chosen": -0.08753497898578644,
127
+ "rewards/margins": 2.817833185195923,
128
+ "rewards/rejected": -2.9053683280944824,
129
+ "step": 8
130
+ },
131
+ {
132
+ "epoch": 0.20224719101123595,
133
+ "grad_norm": 188.34768676757812,
134
+ "learning_rate": 1.928571428571429e-06,
135
+ "logits/chosen": 1.5587732791900635,
136
+ "logits/rejected": 1.6744489669799805,
137
+ "logps/chosen": -2374.6494140625,
138
+ "logps/rejected": -2492.75537109375,
139
+ "loss": 0.448,
140
+ "rewards/accuracies": 0.71875,
141
+ "rewards/chosen": -0.14219728112220764,
142
+ "rewards/margins": 2.7199909687042236,
143
+ "rewards/rejected": -2.8621885776519775,
144
+ "step": 9
145
+ },
146
+ {
147
+ "epoch": 0.2247191011235955,
148
+ "grad_norm": 167.6234588623047,
149
+ "learning_rate": 2.142857142857143e-06,
150
+ "logits/chosen": 1.581652283668518,
151
+ "logits/rejected": 1.5243756771087646,
152
+ "logps/chosen": -2837.341552734375,
153
+ "logps/rejected": -2842.2666015625,
154
+ "loss": 0.3618,
155
+ "rewards/accuracies": 0.765625,
156
+ "rewards/chosen": -0.06367100775241852,
157
+ "rewards/margins": 6.429449081420898,
158
+ "rewards/rejected": -6.493120193481445,
159
+ "step": 10
160
+ },
161
+ {
162
+ "epoch": 0.24719101123595505,
163
+ "grad_norm": 195.05810546875,
164
+ "learning_rate": 2.357142857142857e-06,
165
+ "logits/chosen": 1.531968355178833,
166
+ "logits/rejected": 1.5490195751190186,
167
+ "logps/chosen": -2785.763427734375,
168
+ "logps/rejected": -2938.71533203125,
169
+ "loss": 0.3962,
170
+ "rewards/accuracies": 0.703125,
171
+ "rewards/chosen": -0.2717077136039734,
172
+ "rewards/margins": 8.072213172912598,
173
+ "rewards/rejected": -8.343921661376953,
174
+ "step": 11
175
+ },
176
+ {
177
+ "epoch": 0.2696629213483146,
178
+ "grad_norm": 204.53872680664062,
179
+ "learning_rate": 2.571428571428571e-06,
180
+ "logits/chosen": 1.5632414817810059,
181
+ "logits/rejected": 1.5352647304534912,
182
+ "logps/chosen": -2883.001220703125,
183
+ "logps/rejected": -3065.4296875,
184
+ "loss": 0.4155,
185
+ "rewards/accuracies": 0.6875,
186
+ "rewards/chosen": 0.09219703823328018,
187
+ "rewards/margins": 11.51332950592041,
188
+ "rewards/rejected": -11.421133041381836,
189
+ "step": 12
190
+ },
191
+ {
192
+ "epoch": 0.29213483146067415,
193
+ "grad_norm": 181.2421112060547,
194
+ "learning_rate": 2.785714285714286e-06,
195
+ "logits/chosen": 1.5124785900115967,
196
+ "logits/rejected": 1.4263392686843872,
197
+ "logps/chosen": -3015.5341796875,
198
+ "logps/rejected": -3136.56982421875,
199
+ "loss": 0.3343,
200
+ "rewards/accuracies": 0.75,
201
+ "rewards/chosen": -0.1826700121164322,
202
+ "rewards/margins": 16.418424606323242,
203
+ "rewards/rejected": -16.601093292236328,
204
+ "step": 13
205
+ },
206
+ {
207
+ "epoch": 0.3146067415730337,
208
+ "grad_norm": 178.02650451660156,
209
+ "learning_rate": 3e-06,
210
+ "logits/chosen": 1.4881091117858887,
211
+ "logits/rejected": 1.4641259908676147,
212
+ "logps/chosen": -2906.181396484375,
213
+ "logps/rejected": -3083.74755859375,
214
+ "loss": 0.3189,
215
+ "rewards/accuracies": 0.890625,
216
+ "rewards/chosen": -0.07007797807455063,
217
+ "rewards/margins": 18.051210403442383,
218
+ "rewards/rejected": -18.121288299560547,
219
+ "step": 14
220
+ },
221
+ {
222
+ "epoch": 0.33707865168539325,
223
+ "grad_norm": 188.4379425048828,
224
+ "learning_rate": 2.999468416685179e-06,
225
+ "logits/chosen": 1.4958661794662476,
226
+ "logits/rejected": 1.5740702152252197,
227
+ "logps/chosen": -2589.415771484375,
228
+ "logps/rejected": -2884.312744140625,
229
+ "loss": 0.3903,
230
+ "rewards/accuracies": 0.765625,
231
+ "rewards/chosen": -0.1765696406364441,
232
+ "rewards/margins": 17.232072830200195,
233
+ "rewards/rejected": -17.408641815185547,
234
+ "step": 15
235
+ },
236
+ {
237
+ "epoch": 0.3595505617977528,
238
+ "grad_norm": 161.3037872314453,
239
+ "learning_rate": 2.9978740435151427e-06,
240
+ "logits/chosen": 1.5349267721176147,
241
+ "logits/rejected": 1.491062045097351,
242
+ "logps/chosen": -2951.84619140625,
243
+ "logps/rejected": -3206.8662109375,
244
+ "loss": 0.3059,
245
+ "rewards/accuracies": 0.78125,
246
+ "rewards/chosen": -1.7078287601470947,
247
+ "rewards/margins": 23.868520736694336,
248
+ "rewards/rejected": -25.57634925842285,
249
+ "step": 16
250
+ },
251
+ {
252
+ "epoch": 0.38202247191011235,
253
+ "grad_norm": 186.13180541992188,
254
+ "learning_rate": 2.995218010546125e-06,
255
+ "logits/chosen": 1.4998528957366943,
256
+ "logits/rejected": 1.4576878547668457,
257
+ "logps/chosen": -3011.727783203125,
258
+ "logps/rejected": -3261.4501953125,
259
+ "loss": 0.3808,
260
+ "rewards/accuracies": 0.75,
261
+ "rewards/chosen": -0.25169306993484497,
262
+ "rewards/margins": 35.25308609008789,
263
+ "rewards/rejected": -35.50477600097656,
264
+ "step": 17
265
+ },
266
+ {
267
+ "epoch": 0.4044943820224719,
268
+ "grad_norm": 185.6712188720703,
269
+ "learning_rate": 2.9915022003152055e-06,
270
+ "logits/chosen": 1.6139241456985474,
271
+ "logits/rejected": 1.5550901889801025,
272
+ "logps/chosen": -2965.4423828125,
273
+ "logps/rejected": -3224.514404296875,
274
+ "loss": 0.3542,
275
+ "rewards/accuracies": 0.8125,
276
+ "rewards/chosen": 1.8823347091674805,
277
+ "rewards/margins": 39.0025634765625,
278
+ "rewards/rejected": -37.12023162841797,
279
+ "step": 18
280
+ },
281
+ {
282
+ "epoch": 0.42696629213483145,
283
+ "grad_norm": 182.43603515625,
284
+ "learning_rate": 2.986729246506011e-06,
285
+ "logits/chosen": 1.244603157043457,
286
+ "logits/rejected": 1.2053301334381104,
287
+ "logps/chosen": -2764.19189453125,
288
+ "logps/rejected": -3084.441650390625,
289
+ "loss": 0.367,
290
+ "rewards/accuracies": 0.765625,
291
+ "rewards/chosen": -1.6243125200271606,
292
+ "rewards/margins": 43.56684112548828,
293
+ "rewards/rejected": -45.1911506652832,
294
+ "step": 19
295
+ },
296
+ {
297
+ "epoch": 0.449438202247191,
298
+ "grad_norm": 198.76722717285156,
299
+ "learning_rate": 2.980902532082017e-06,
300
+ "logits/chosen": 1.4910385608673096,
301
+ "logits/rejected": 1.4667646884918213,
302
+ "logps/chosen": -2632.417724609375,
303
+ "logps/rejected": -2912.476806640625,
304
+ "loss": 0.4946,
305
+ "rewards/accuracies": 0.75,
306
+ "rewards/chosen": -2.317056179046631,
307
+ "rewards/margins": 34.359012603759766,
308
+ "rewards/rejected": -36.676063537597656,
309
+ "step": 20
310
+ },
311
+ {
312
+ "epoch": 0.47191011235955055,
313
+ "grad_norm": 203.78700256347656,
314
+ "learning_rate": 2.9740261868887817e-06,
315
+ "logits/chosen": 1.4394636154174805,
316
+ "logits/rejected": 1.3155745267868042,
317
+ "logps/chosen": -2808.47509765625,
318
+ "logps/rejected": -3043.707763671875,
319
+ "loss": 0.4802,
320
+ "rewards/accuracies": 0.71875,
321
+ "rewards/chosen": 1.6056139469146729,
322
+ "rewards/margins": 43.16130065917969,
323
+ "rewards/rejected": -41.555686950683594,
324
+ "step": 21
325
+ },
326
+ {
327
+ "epoch": 0.4943820224719101,
328
+ "grad_norm": 199.40330505371094,
329
+ "learning_rate": 2.9661050847268e-06,
330
+ "logits/chosen": 1.3054568767547607,
331
+ "logits/rejected": 1.2870110273361206,
332
+ "logps/chosen": -2704.07568359375,
333
+ "logps/rejected": -3091.42626953125,
334
+ "loss": 0.4924,
335
+ "rewards/accuracies": 0.828125,
336
+ "rewards/chosen": -4.835676670074463,
337
+ "rewards/margins": 40.92457580566406,
338
+ "rewards/rejected": -45.76025390625,
339
+ "step": 22
340
+ },
341
+ {
342
+ "epoch": 0.5168539325842697,
343
+ "grad_norm": 184.34901428222656,
344
+ "learning_rate": 2.957144839897065e-06,
345
+ "logits/chosen": 1.5794934034347534,
346
+ "logits/rejected": 1.374954104423523,
347
+ "logps/chosen": -2828.36083984375,
348
+ "logps/rejected": -3111.46875,
349
+ "loss": 0.4932,
350
+ "rewards/accuracies": 0.734375,
351
+ "rewards/chosen": 3.432398796081543,
352
+ "rewards/margins": 62.3823356628418,
353
+ "rewards/rejected": -58.9499397277832,
354
+ "step": 23
355
+ },
356
+ {
357
+ "epoch": 0.5393258426966292,
358
+ "grad_norm": 198.54269409179688,
359
+ "learning_rate": 2.947151803221774e-06,
360
+ "logits/chosen": 1.6772565841674805,
361
+ "logits/rejected": 1.6362934112548828,
362
+ "logps/chosen": -2880.4677734375,
363
+ "logps/rejected": -3303.3857421875,
364
+ "loss": 0.3869,
365
+ "rewards/accuracies": 0.796875,
366
+ "rewards/chosen": 0.12497274577617645,
367
+ "rewards/margins": 53.7283821105957,
368
+ "rewards/rejected": -53.60340881347656,
369
+ "step": 24
370
+ },
371
+ {
372
+ "epoch": 0.5617977528089888,
373
+ "grad_norm": 173.3833465576172,
374
+ "learning_rate": 2.936133057543008e-06,
375
+ "logits/chosen": 1.4493129253387451,
376
+ "logits/rejected": 1.3350006341934204,
377
+ "logps/chosen": -2721.460693359375,
378
+ "logps/rejected": -3138.864990234375,
379
+ "loss": 0.3981,
380
+ "rewards/accuracies": 0.78125,
381
+ "rewards/chosen": 2.794492244720459,
382
+ "rewards/margins": 69.71061706542969,
383
+ "rewards/rejected": -66.91613006591797,
384
+ "step": 25
385
+ },
386
+ {
387
+ "epoch": 0.5842696629213483,
388
+ "grad_norm": 232.13525390625,
389
+ "learning_rate": 2.924096412702572e-06,
390
+ "logits/chosen": 1.7099878787994385,
391
+ "logits/rejected": 1.5226480960845947,
392
+ "logps/chosen": -2983.288330078125,
393
+ "logps/rejected": -3093.673095703125,
394
+ "loss": 0.613,
395
+ "rewards/accuracies": 0.703125,
396
+ "rewards/chosen": 2.1761527061462402,
397
+ "rewards/margins": 59.57087326049805,
398
+ "rewards/rejected": -57.394718170166016,
399
+ "step": 26
400
+ },
401
+ {
402
+ "epoch": 0.6067415730337079,
403
+ "grad_norm": 162.77978515625,
404
+ "learning_rate": 2.91105040000655e-06,
405
+ "logits/chosen": 1.4071202278137207,
406
+ "logits/rejected": 1.4425785541534424,
407
+ "logps/chosen": -2522.546630859375,
408
+ "logps/rejected": -3321.0537109375,
409
+ "loss": 0.4005,
410
+ "rewards/accuracies": 0.859375,
411
+ "rewards/chosen": 1.8253318071365356,
412
+ "rewards/margins": 63.75608825683594,
413
+ "rewards/rejected": -61.930755615234375,
414
+ "step": 27
415
+ },
416
+ {
417
+ "epoch": 0.6292134831460674,
418
+ "grad_norm": 207.4031219482422,
419
+ "learning_rate": 2.897004266178508e-06,
420
+ "logits/chosen": 1.5841655731201172,
421
+ "logits/rejected": 1.4097201824188232,
422
+ "logps/chosen": -3239.787841796875,
423
+ "logps/rejected": -3663.88232421875,
424
+ "loss": 0.522,
425
+ "rewards/accuracies": 0.765625,
426
+ "rewards/chosen": -0.2217176854610443,
427
+ "rewards/margins": 58.664180755615234,
428
+ "rewards/rejected": -58.88589859008789,
429
+ "step": 28
430
+ },
431
+ {
432
+ "epoch": 0.651685393258427,
433
+ "grad_norm": 172.96218872070312,
434
+ "learning_rate": 2.8819679668056195e-06,
435
+ "logits/chosen": 1.6320128440856934,
436
+ "logits/rejected": 1.5467625856399536,
437
+ "logps/chosen": -2654.78271484375,
438
+ "logps/rejected": -3225.193359375,
439
+ "loss": 0.3816,
440
+ "rewards/accuracies": 0.765625,
441
+ "rewards/chosen": 2.769482374191284,
442
+ "rewards/margins": 65.22299194335938,
443
+ "rewards/rejected": -62.453514099121094,
444
+ "step": 29
445
+ },
446
+ {
447
+ "epoch": 0.6741573033707865,
448
+ "grad_norm": 200.36915588378906,
449
+ "learning_rate": 2.8659521592823702e-06,
450
+ "logits/chosen": 1.6264617443084717,
451
+ "logits/rejected": 1.421095848083496,
452
+ "logps/chosen": -2914.17529296875,
453
+ "logps/rejected": -3396.08544921875,
454
+ "loss": 0.4913,
455
+ "rewards/accuracies": 0.765625,
456
+ "rewards/chosen": 7.334710121154785,
457
+ "rewards/margins": 89.93038177490234,
458
+ "rewards/rejected": -82.59567260742188,
459
+ "step": 30
460
+ },
461
+ {
462
+ "epoch": 0.6966292134831461,
463
+ "grad_norm": 250.5316162109375,
464
+ "learning_rate": 2.848968195256829e-06,
465
+ "logits/chosen": 1.6201553344726562,
466
+ "logits/rejected": 1.4870961904525757,
467
+ "logps/chosen": -3036.192138671875,
468
+ "logps/rejected": -3605.6904296875,
469
+ "loss": 0.708,
470
+ "rewards/accuracies": 0.65625,
471
+ "rewards/chosen": 4.598369121551514,
472
+ "rewards/margins": 79.35784149169922,
473
+ "rewards/rejected": -74.75946807861328,
474
+ "step": 31
475
+ },
476
+ {
477
+ "epoch": 0.7191011235955056,
478
+ "grad_norm": 228.1786346435547,
479
+ "learning_rate": 2.831028112584857e-06,
480
+ "logits/chosen": 1.3086817264556885,
481
+ "logits/rejected": 1.2920796871185303,
482
+ "logps/chosen": -2828.72900390625,
483
+ "logps/rejected": -3492.97802734375,
484
+ "loss": 0.5514,
485
+ "rewards/accuracies": 0.6875,
486
+ "rewards/chosen": 0.8046822547912598,
487
+ "rewards/margins": 77.88575744628906,
488
+ "rewards/rejected": -77.08108520507812,
489
+ "step": 32
490
+ },
491
+ {
492
+ "epoch": 0.7415730337078652,
493
+ "grad_norm": 156.25662231445312,
494
+ "learning_rate": 2.812144626797942e-06,
495
+ "logits/chosen": 1.3912537097930908,
496
+ "logits/rejected": 1.1646690368652344,
497
+ "logps/chosen": -3173.48388671875,
498
+ "logps/rejected": -3708.0390625,
499
+ "loss": 0.4043,
500
+ "rewards/accuracies": 0.875,
501
+ "rewards/chosen": 2.820896863937378,
502
+ "rewards/margins": 82.55420684814453,
503
+ "rewards/rejected": -79.73331451416016,
504
+ "step": 33
505
+ },
506
+ {
507
+ "epoch": 0.7640449438202247,
508
+ "grad_norm": 189.89682006835938,
509
+ "learning_rate": 2.792331122090709e-06,
510
+ "logits/chosen": 1.525010108947754,
511
+ "logits/rejected": 1.4141947031021118,
512
+ "logps/chosen": -2818.591064453125,
513
+ "logps/rejected": -3415.1484375,
514
+ "loss": 0.4825,
515
+ "rewards/accuracies": 0.796875,
516
+ "rewards/chosen": 1.3273561000823975,
517
+ "rewards/margins": 81.49795532226562,
518
+ "rewards/rejected": -80.17059326171875,
519
+ "step": 34
520
+ },
521
+ {
522
+ "epoch": 0.7865168539325843,
523
+ "grad_norm": 198.3324432373047,
524
+ "learning_rate": 2.7716016418345064e-06,
525
+ "logits/chosen": 1.5669187307357788,
526
+ "logits/rejected": 1.3444348573684692,
527
+ "logps/chosen": -2831.2744140625,
528
+ "logps/rejected": -3359.554931640625,
529
+ "loss": 0.4821,
530
+ "rewards/accuracies": 0.84375,
531
+ "rewards/chosen": 4.969450950622559,
532
+ "rewards/margins": 95.5076675415039,
533
+ "rewards/rejected": -90.53821563720703,
534
+ "step": 35
535
+ },
536
+ {
537
+ "epoch": 0.8089887640449438,
538
+ "grad_norm": 202.50929260253906,
539
+ "learning_rate": 2.7499708786237724e-06,
540
+ "logits/chosen": 1.6073535680770874,
541
+ "logits/rejected": 1.5690536499023438,
542
+ "logps/chosen": -2898.311279296875,
543
+ "logps/rejected": -3199.489013671875,
544
+ "loss": 0.5359,
545
+ "rewards/accuracies": 0.796875,
546
+ "rewards/chosen": -3.0962305068969727,
547
+ "rewards/margins": 49.8695182800293,
548
+ "rewards/rejected": -52.96574783325195,
549
+ "step": 36
550
+ },
551
+ {
552
+ "epoch": 0.8314606741573034,
553
+ "grad_norm": 172.3883056640625,
554
+ "learning_rate": 2.7274541638622533e-06,
555
+ "logits/chosen": 1.5025634765625,
556
+ "logits/rejected": 1.2939093112945557,
557
+ "logps/chosen": -2682.772705078125,
558
+ "logps/rejected": -3070.16259765625,
559
+ "loss": 0.5118,
560
+ "rewards/accuracies": 0.859375,
561
+ "rewards/chosen": -0.5182172060012817,
562
+ "rewards/margins": 86.14014434814453,
563
+ "rewards/rejected": -86.65835571289062,
564
+ "step": 37
565
+ },
566
+ {
567
+ "epoch": 0.8539325842696629,
568
+ "grad_norm": 200.7554473876953,
569
+ "learning_rate": 2.7040674568964452e-06,
570
+ "logits/chosen": 1.4808025360107422,
571
+ "logits/rejected": 1.3251252174377441,
572
+ "logps/chosen": -2854.599365234375,
573
+ "logps/rejected": -3208.1640625,
574
+ "loss": 0.5253,
575
+ "rewards/accuracies": 0.8125,
576
+ "rewards/chosen": 1.5150139331817627,
577
+ "rewards/margins": 78.78499603271484,
578
+ "rewards/rejected": -77.26997375488281,
579
+ "step": 38
580
+ },
581
+ {
582
+ "epoch": 0.8764044943820225,
583
+ "grad_norm": 217.05526733398438,
584
+ "learning_rate": 2.679827333703964e-06,
585
+ "logits/chosen": 1.5550140142440796,
586
+ "logits/rejected": 1.5405230522155762,
587
+ "logps/chosen": -2775.199951171875,
588
+ "logps/rejected": -3292.66650390625,
589
+ "loss": 0.5094,
590
+ "rewards/accuracies": 0.765625,
591
+ "rewards/chosen": -0.5831690430641174,
592
+ "rewards/margins": 75.25239562988281,
593
+ "rewards/rejected": -75.8355712890625,
594
+ "step": 39
595
+ },
596
+ {
597
+ "epoch": 0.898876404494382,
598
+ "grad_norm": 260.61224365234375,
599
+ "learning_rate": 2.6547509751448593e-06,
600
+ "logits/chosen": 1.5327131748199463,
601
+ "logits/rejected": 1.404789924621582,
602
+ "logps/chosen": -2995.2666015625,
603
+ "logps/rejected": -3701.7333984375,
604
+ "loss": 0.7054,
605
+ "rewards/accuracies": 0.703125,
606
+ "rewards/chosen": 4.574828147888184,
607
+ "rewards/margins": 96.09221649169922,
608
+ "rewards/rejected": -91.51737976074219,
609
+ "step": 40
610
+ },
611
+ {
612
+ "epoch": 0.9213483146067416,
613
+ "grad_norm": 210.46607971191406,
614
+ "learning_rate": 2.6288561547842076e-06,
615
+ "logits/chosen": 1.5143060684204102,
616
+ "logits/rejected": 1.2557826042175293,
617
+ "logps/chosen": -2932.751953125,
618
+ "logps/rejected": -3389.65185546875,
619
+ "loss": 0.6426,
620
+ "rewards/accuracies": 0.78125,
621
+ "rewards/chosen": 3.5902769565582275,
622
+ "rewards/margins": 102.1531982421875,
623
+ "rewards/rejected": -98.56291198730469,
624
+ "step": 41
625
+ },
626
+ {
627
+ "epoch": 0.9438202247191011,
628
+ "grad_norm": 203.90863037109375,
629
+ "learning_rate": 2.602161226294601e-06,
630
+ "logits/chosen": 1.4669859409332275,
631
+ "logits/rejected": 1.254248023033142,
632
+ "logps/chosen": -3275.650146484375,
633
+ "logps/rejected": -3885.744873046875,
634
+ "loss": 0.5032,
635
+ "rewards/accuracies": 0.796875,
636
+ "rewards/chosen": -7.145351886749268,
637
+ "rewards/margins": 94.66647338867188,
638
+ "rewards/rejected": -101.81182861328125,
639
+ "step": 42
640
+ },
641
+ {
642
+ "epoch": 0.9662921348314607,
643
+ "grad_norm": 190.71495056152344,
644
+ "learning_rate": 2.5746851104474728e-06,
645
+ "logits/chosen": 1.4877179861068726,
646
+ "logits/rejected": 1.3816105127334595,
647
+ "logps/chosen": -2700.980224609375,
648
+ "logps/rejected": -3283.328125,
649
+ "loss": 0.4432,
650
+ "rewards/accuracies": 0.828125,
651
+ "rewards/chosen": 1.710632085800171,
652
+ "rewards/margins": 75.0985107421875,
653
+ "rewards/rejected": -73.38786315917969,
654
+ "step": 43
655
+ },
656
+ {
657
+ "epoch": 0.9887640449438202,
658
+ "grad_norm": 192.31964111328125,
659
+ "learning_rate": 2.5464472817024772e-06,
660
+ "logits/chosen": 1.3617230653762817,
661
+ "logits/rejected": 1.2478257417678833,
662
+ "logps/chosen": -2841.803466796875,
663
+ "logps/rejected": -3503.9794921875,
664
+ "loss": 0.5194,
665
+ "rewards/accuracies": 0.78125,
666
+ "rewards/chosen": 4.092733383178711,
667
+ "rewards/margins": 110.31430053710938,
668
+ "rewards/rejected": -106.22156524658203,
669
+ "step": 44
670
+ },
671
+ {
672
+ "epoch": 1.0,
673
+ "grad_norm": 192.31964111328125,
674
+ "learning_rate": 2.517467754404424e-06,
675
+ "logits/chosen": 1.3865031003952026,
676
+ "logits/rejected": 1.2281872034072876,
677
+ "logps/chosen": -2563.0751953125,
678
+ "logps/rejected": -2940.1357421875,
679
+ "loss": 0.2103,
680
+ "rewards/accuracies": 0.84375,
681
+ "rewards/chosen": 4.377815246582031,
682
+ "rewards/margins": 81.93372344970703,
683
+ "rewards/rejected": -77.555908203125,
684
+ "step": 45
685
+ },
686
+ {
687
+ "epoch": 1.0224719101123596,
688
+ "grad_norm": 135.86026000976562,
689
+ "learning_rate": 2.487767068597558e-06,
690
+ "logits/chosen": 1.5341211557388306,
691
+ "logits/rejected": 1.4015753269195557,
692
+ "logps/chosen": -3250.149658203125,
693
+ "logps/rejected": -3893.629150390625,
694
+ "loss": 0.0037,
695
+ "rewards/accuracies": 1.0,
696
+ "rewards/chosen": 19.023714065551758,
697
+ "rewards/margins": 134.42942810058594,
698
+ "rewards/rejected": -115.40570068359375,
699
+ "step": 46
700
+ },
701
+ {
702
+ "epoch": 1.0449438202247192,
703
+ "grad_norm": 1.9560177326202393,
704
+ "learning_rate": 2.4573662754672303e-06,
705
+ "logits/chosen": 1.4638060331344604,
706
+ "logits/rejected": 1.396654486656189,
707
+ "logps/chosen": -2667.339599609375,
708
+ "logps/rejected": -3516.595703125,
709
+ "loss": 0.0114,
710
+ "rewards/accuracies": 0.984375,
711
+ "rewards/chosen": 8.45435905456543,
712
+ "rewards/margins": 107.95783996582031,
713
+ "rewards/rejected": -99.50347900390625,
714
+ "step": 47
715
+ },
716
+ {
717
+ "epoch": 1.0674157303370786,
718
+ "grad_norm": 14.909017562866211,
719
+ "learning_rate": 2.426286922419288e-06,
720
+ "logits/chosen": 1.6447203159332275,
721
+ "logits/rejected": 1.6282371282577515,
722
+ "logps/chosen": -2377.240478515625,
723
+ "logps/rejected": -2950.48583984375,
724
+ "loss": 0.0154,
725
+ "rewards/accuracies": 0.984375,
726
+ "rewards/chosen": 7.06836462020874,
727
+ "rewards/margins": 84.36599731445312,
728
+ "rewards/rejected": -77.29763793945312,
729
+ "step": 48
730
+ },
731
+ {
732
+ "epoch": 1.0898876404494382,
733
+ "grad_norm": 4.328535556793213,
734
+ "learning_rate": 2.3945510378077523e-06,
735
+ "logits/chosen": 1.3356518745422363,
736
+ "logits/rejected": 1.2965461015701294,
737
+ "logps/chosen": -2788.0400390625,
738
+ "logps/rejected": -3457.5185546875,
739
+ "loss": 0.0024,
740
+ "rewards/accuracies": 1.0,
741
+ "rewards/chosen": 11.870361328125,
742
+ "rewards/margins": 103.6649169921875,
743
+ "rewards/rejected": -91.79456329345703,
744
+ "step": 49
745
+ },
746
+ {
747
+ "epoch": 1.1123595505617978,
748
+ "grad_norm": 6.1306352615356445,
749
+ "learning_rate": 2.3621811153216106e-06,
750
+ "logits/chosen": 1.3586758375167847,
751
+ "logits/rejected": 1.2172551155090332,
752
+ "logps/chosen": -3142.0791015625,
753
+ "logps/rejected": -3848.3056640625,
754
+ "loss": 0.0108,
755
+ "rewards/accuracies": 0.984375,
756
+ "rewards/chosen": 5.018255710601807,
757
+ "rewards/margins": 121.07866668701172,
758
+ "rewards/rejected": -116.06040954589844,
759
+ "step": 50
760
+ },
761
+ {
762
+ "epoch": 1.1348314606741572,
763
+ "grad_norm": 2.2042205333709717,
764
+ "learning_rate": 2.32920009804179e-06,
765
+ "logits/chosen": 1.676792860031128,
766
+ "logits/rejected": 1.4110440015792847,
767
+ "logps/chosen": -2846.33056640625,
768
+ "logps/rejected": -3573.93359375,
769
+ "loss": 0.0116,
770
+ "rewards/accuracies": 0.984375,
771
+ "rewards/chosen": 16.190317153930664,
772
+ "rewards/margins": 119.14263153076172,
773
+ "rewards/rejected": -102.95230102539062,
774
+ "step": 51
775
+ },
776
+ {
777
+ "epoch": 1.1573033707865168,
778
+ "grad_norm": 13.62660026550293,
779
+ "learning_rate": 2.2956313621796135e-06,
780
+ "logits/chosen": 1.5751538276672363,
781
+ "logits/rejected": 1.4073097705841064,
782
+ "logps/chosen": -2536.8515625,
783
+ "logps/rejected": -3102.68896484375,
784
+ "loss": 0.0147,
785
+ "rewards/accuracies": 0.984375,
786
+ "rewards/chosen": 7.306772232055664,
787
+ "rewards/margins": 98.24702453613281,
788
+ "rewards/rejected": -90.94024658203125,
789
+ "step": 52
790
+ },
791
+ {
792
+ "epoch": 1.1797752808988764,
793
+ "grad_norm": 1.355103850364685,
794
+ "learning_rate": 2.26149870050826e-06,
795
+ "logits/chosen": 1.363991618156433,
796
+ "logits/rejected": 1.1863415241241455,
797
+ "logps/chosen": -3056.833740234375,
798
+ "logps/rejected": -3680.160888671875,
799
+ "loss": 0.0007,
800
+ "rewards/accuracies": 1.0,
801
+ "rewards/chosen": 9.664068222045898,
802
+ "rewards/margins": 112.41234588623047,
803
+ "rewards/rejected": -102.74827575683594,
804
+ "step": 53
805
+ },
806
+ {
807
+ "epoch": 1.202247191011236,
808
+ "grad_norm": 2.3306772708892822,
809
+ "learning_rate": 2.2268263054989753e-06,
810
+ "logits/chosen": 1.54270339012146,
811
+ "logits/rejected": 1.475841760635376,
812
+ "logps/chosen": -2780.744384765625,
813
+ "logps/rejected": -3487.5322265625,
814
+ "loss": 0.001,
815
+ "rewards/accuracies": 1.0,
816
+ "rewards/chosen": 11.756105422973633,
817
+ "rewards/margins": 107.98931884765625,
818
+ "rewards/rejected": -96.23321533203125,
819
+ "step": 54
820
+ },
821
+ {
822
+ "epoch": 1.2247191011235956,
823
+ "grad_norm": 1.47923743724823,
824
+ "learning_rate": 2.191638752173989e-06,
825
+ "logits/chosen": 1.6175808906555176,
826
+ "logits/rejected": 1.5379141569137573,
827
+ "logps/chosen": -2748.61328125,
828
+ "logps/rejected": -3274.468017578125,
829
+ "loss": 0.0117,
830
+ "rewards/accuracies": 0.984375,
831
+ "rewards/chosen": 8.739614486694336,
832
+ "rewards/margins": 110.58942413330078,
833
+ "rewards/rejected": -101.84980010986328,
834
+ "step": 55
835
+ },
836
+ {
837
+ "epoch": 1.247191011235955,
838
+ "grad_norm": 3.0752482414245605,
839
+ "learning_rate": 2.1559609806882834e-06,
840
+ "logits/chosen": 1.4324688911437988,
841
+ "logits/rejected": 1.2107815742492676,
842
+ "logps/chosen": -2790.97509765625,
843
+ "logps/rejected": -3406.87744140625,
844
+ "loss": 0.0003,
845
+ "rewards/accuracies": 1.0,
846
+ "rewards/chosen": 5.457365989685059,
847
+ "rewards/margins": 89.03166198730469,
848
+ "rewards/rejected": -83.57430267333984,
849
+ "step": 56
850
+ },
851
+ {
852
+ "epoch": 1.2696629213483146,
853
+ "grad_norm": 0.07106953859329224,
854
+ "learning_rate": 2.1198182786525674e-06,
855
+ "logits/chosen": 1.409006118774414,
856
+ "logits/rejected": 1.2638301849365234,
857
+ "logps/chosen": -2571.373046875,
858
+ "logps/rejected": -3436.89892578125,
859
+ "loss": 0.0112,
860
+ "rewards/accuracies": 0.984375,
861
+ "rewards/chosen": 12.910816192626953,
862
+ "rewards/margins": 133.70639038085938,
863
+ "rewards/rejected": -120.79557800292969,
864
+ "step": 57
865
+ },
866
+ {
867
+ "epoch": 1.2921348314606742,
868
+ "grad_norm": 1.3202946186065674,
869
+ "learning_rate": 2.0832362632099813e-06,
870
+ "logits/chosen": 1.4980010986328125,
871
+ "logits/rejected": 1.1623045206069946,
872
+ "logps/chosen": -3144.611083984375,
873
+ "logps/rejected": -3731.18212890625,
874
+ "loss": 0.0051,
875
+ "rewards/accuracies": 1.0,
876
+ "rewards/chosen": 9.096885681152344,
877
+ "rewards/margins": 142.87937927246094,
878
+ "rewards/rejected": -133.78250122070312,
879
+ "step": 58
880
+ },
881
+ {
882
+ "epoch": 1.3146067415730336,
883
+ "grad_norm": 2.9557082653045654,
884
+ "learning_rate": 2.0462408628792335e-06,
885
+ "logits/chosen": 1.6109601259231567,
886
+ "logits/rejected": 1.4365208148956299,
887
+ "logps/chosen": -2812.40625,
888
+ "logps/rejected": -3437.3193359375,
889
+ "loss": 0.0001,
890
+ "rewards/accuracies": 1.0,
891
+ "rewards/chosen": 8.375179290771484,
892
+ "rewards/margins": 111.16755676269531,
893
+ "rewards/rejected": -102.79237365722656,
894
+ "step": 59
895
+ },
896
+ {
897
+ "epoch": 1.3370786516853932,
898
+ "grad_norm": 0.2892356514930725,
899
+ "learning_rate": 2.008858299177045e-06,
900
+ "logits/chosen": 1.4753564596176147,
901
+ "logits/rejected": 1.2640880346298218,
902
+ "logps/chosen": -2899.793212890625,
903
+ "logps/rejected": -3406.771240234375,
904
+ "loss": 0.0157,
905
+ "rewards/accuracies": 0.984375,
906
+ "rewards/chosen": 7.380945682525635,
907
+ "rewards/margins": 106.26220703125,
908
+ "rewards/rejected": -98.88125610351562,
909
+ "step": 60
910
+ },
911
+ {
912
+ "epoch": 1.3595505617977528,
913
+ "grad_norm": 50.00154495239258,
914
+ "learning_rate": 1.9711150680329234e-06,
915
+ "logits/chosen": 1.6642662286758423,
916
+ "logits/rejected": 1.473952054977417,
917
+ "logps/chosen": -2834.24072265625,
918
+ "logps/rejected": -3363.942138671875,
919
+ "loss": 0.0175,
920
+ "rewards/accuracies": 0.984375,
921
+ "rewards/chosen": 8.414569854736328,
922
+ "rewards/margins": 110.77262115478516,
923
+ "rewards/rejected": -102.35804748535156,
924
+ "step": 61
925
+ },
926
+ {
927
+ "epoch": 1.3820224719101124,
928
+ "grad_norm": 0.07520447671413422,
929
+ "learning_rate": 1.9330379210094315e-06,
930
+ "logits/chosen": 1.5798277854919434,
931
+ "logits/rejected": 1.4446996450424194,
932
+ "logps/chosen": -2692.41162109375,
933
+ "logps/rejected": -3175.50830078125,
934
+ "loss": 0.0118,
935
+ "rewards/accuracies": 0.984375,
936
+ "rewards/chosen": 5.677203178405762,
937
+ "rewards/margins": 96.32395935058594,
938
+ "rewards/rejected": -90.64675903320312,
939
+ "step": 62
940
+ },
941
+ {
942
+ "epoch": 1.404494382022472,
943
+ "grad_norm": 3.16860032081604,
944
+ "learning_rate": 1.8946538463412818e-06,
945
+ "logits/chosen": 1.606536865234375,
946
+ "logits/rejected": 1.5855745077133179,
947
+ "logps/chosen": -2659.635986328125,
948
+ "logps/rejected": -3431.36572265625,
949
+ "loss": 0.0,
950
+ "rewards/accuracies": 1.0,
951
+ "rewards/chosen": 10.329705238342285,
952
+ "rewards/margins": 98.20384216308594,
953
+ "rewards/rejected": -87.87415313720703,
954
+ "step": 63
955
+ },
956
+ {
957
+ "epoch": 1.4269662921348314,
958
+ "grad_norm": 0.042245469987392426,
959
+ "learning_rate": 1.8559900498066726e-06,
960
+ "logits/chosen": 1.605839490890503,
961
+ "logits/rejected": 1.3888914585113525,
962
+ "logps/chosen": -2774.67529296875,
963
+ "logps/rejected": -3620.492431640625,
964
+ "loss": 0.0092,
965
+ "rewards/accuracies": 1.0,
966
+ "rewards/chosen": 14.000102996826172,
967
+ "rewards/margins": 140.67535400390625,
968
+ "rewards/rejected": -126.67523956298828,
969
+ "step": 64
970
+ },
971
+ {
972
+ "epoch": 1.449438202247191,
973
+ "grad_norm": 28.373090744018555,
974
+ "learning_rate": 1.8170739354444366e-06,
975
+ "logits/chosen": 1.5468522310256958,
976
+ "logits/rejected": 1.316043734550476,
977
+ "logps/chosen": -2898.541015625,
978
+ "logps/rejected": -3607.741943359375,
979
+ "loss": 0.0009,
980
+ "rewards/accuracies": 1.0,
981
+ "rewards/chosen": 9.336808204650879,
982
+ "rewards/margins": 125.04135131835938,
983
+ "rewards/rejected": -115.70454406738281,
984
+ "step": 65
985
+ },
986
+ {
987
+ "epoch": 1.4719101123595506,
988
+ "grad_norm": 3.688307046890259,
989
+ "learning_rate": 1.7779330861306717e-06,
990
+ "logits/chosen": 1.4648973941802979,
991
+ "logits/rejected": 1.3168296813964844,
992
+ "logps/chosen": -3060.658935546875,
993
+ "logps/rejected": -4020.65185546875,
994
+ "loss": 0.0001,
995
+ "rewards/accuracies": 1.0,
996
+ "rewards/chosen": 3.3615617752075195,
997
+ "rewards/margins": 130.01849365234375,
998
+ "rewards/rejected": -126.65692138671875,
999
+ "step": 66
1000
+ },
1001
+ {
1002
+ "epoch": 1.49438202247191,
1003
+ "grad_norm": 21.308137893676758,
1004
+ "learning_rate": 1.738595244028608e-06,
1005
+ "logits/chosen": 1.4748642444610596,
1006
+ "logits/rejected": 1.3131040334701538,
1007
+ "logps/chosen": -2794.14599609375,
1008
+ "logps/rejected": -3351.5478515625,
1009
+ "loss": 0.0081,
1010
+ "rewards/accuracies": 1.0,
1011
+ "rewards/chosen": 2.8835487365722656,
1012
+ "rewards/margins": 98.07205963134766,
1013
+ "rewards/rejected": -95.18850708007812,
1014
+ "step": 67
1015
+ },
1016
+ {
1017
+ "epoch": 1.5168539325842696,
1018
+ "grad_norm": 1.3383527994155884,
1019
+ "learning_rate": 1.699088290925583e-06,
1020
+ "logits/chosen": 1.372517704963684,
1021
+ "logits/rejected": 1.302228569984436,
1022
+ "logps/chosen": -2794.654052734375,
1023
+ "logps/rejected": -3820.33837890625,
1024
+ "loss": 0.0112,
1025
+ "rewards/accuracies": 0.984375,
1026
+ "rewards/chosen": 9.68542766571045,
1027
+ "rewards/margins": 141.4244842529297,
1028
+ "rewards/rejected": -131.73907470703125,
1029
+ "step": 68
1030
+ },
1031
+ {
1032
+ "epoch": 1.5393258426966292,
1033
+ "grad_norm": 1.4769072532653809,
1034
+ "learning_rate": 1.6594402284710481e-06,
1035
+ "logits/chosen": 1.5602664947509766,
1036
+ "logits/rejected": 1.4328043460845947,
1037
+ "logps/chosen": -2850.06640625,
1038
+ "logps/rejected": -3549.932861328125,
1039
+ "loss": 0.026,
1040
+ "rewards/accuracies": 0.984375,
1041
+ "rewards/chosen": 5.793665409088135,
1042
+ "rewards/margins": 124.38016510009766,
1043
+ "rewards/rejected": -118.58650970458984,
1044
+ "step": 69
1045
+ },
1046
+ {
1047
+ "epoch": 1.5617977528089888,
1048
+ "grad_norm": 5.262300968170166,
1049
+ "learning_rate": 1.6196791583296247e-06,
1050
+ "logits/chosen": 1.4012134075164795,
1051
+ "logits/rejected": 1.2154825925827026,
1052
+ "logps/chosen": -2862.569580078125,
1053
+ "logps/rejected": -3687.36328125,
1054
+ "loss": 0.003,
1055
+ "rewards/accuracies": 1.0,
1056
+ "rewards/chosen": 12.932228088378906,
1057
+ "rewards/margins": 135.03558349609375,
1058
+ "rewards/rejected": -122.10337829589844,
1059
+ "step": 70
1060
+ },
1061
+ {
1062
+ "epoch": 1.5842696629213484,
1063
+ "grad_norm": 2.9438984394073486,
1064
+ "learning_rate": 1.579833262263268e-06,
1065
+ "logits/chosen": 1.4590383768081665,
1066
+ "logits/rejected": 1.1356399059295654,
1067
+ "logps/chosen": -2651.068603515625,
1068
+ "logps/rejected": -3142.91455078125,
1069
+ "loss": 0.0118,
1070
+ "rewards/accuracies": 0.984375,
1071
+ "rewards/chosen": 9.391037940979004,
1072
+ "rewards/margins": 119.59295654296875,
1073
+ "rewards/rejected": -110.2019271850586,
1074
+ "step": 71
1075
+ },
1076
+ {
1077
+ "epoch": 1.606741573033708,
1078
+ "grad_norm": 0.6242117881774902,
1079
+ "learning_rate": 1.5399307821566623e-06,
1080
+ "logits/chosen": 1.5220391750335693,
1081
+ "logits/rejected": 1.2139172554016113,
1082
+ "logps/chosen": -2834.0634765625,
1083
+ "logps/rejected": -3674.3623046875,
1084
+ "loss": 0.0218,
1085
+ "rewards/accuracies": 0.984375,
1086
+ "rewards/chosen": 14.53393268585205,
1087
+ "rewards/margins": 154.6046142578125,
1088
+ "rewards/rejected": -140.0706787109375,
1089
+ "step": 72
1090
+ },
1091
+ {
1092
+ "epoch": 1.6292134831460674,
1093
+ "grad_norm": 0.17758429050445557,
1094
+ "learning_rate": 1.5e-06,
1095
+ "logits/chosen": 1.531368374824524,
1096
+ "logits/rejected": 1.3681552410125732,
1097
+ "logps/chosen": -2943.841064453125,
1098
+ "logps/rejected": -3831.00927734375,
1099
+ "loss": 0.0117,
1100
+ "rewards/accuracies": 0.984375,
1101
+ "rewards/chosen": 11.650660514831543,
1102
+ "rewards/margins": 151.18350219726562,
1103
+ "rewards/rejected": -139.5328369140625,
1104
+ "step": 73
1105
+ },
1106
+ {
1107
+ "epoch": 1.651685393258427,
1108
+ "grad_norm": 12.694519996643066,
1109
+ "learning_rate": 1.460069217843338e-06,
1110
+ "logits/chosen": 1.416333794593811,
1111
+ "logits/rejected": 1.1884994506835938,
1112
+ "logps/chosen": -3090.49658203125,
1113
+ "logps/rejected": -3794.48095703125,
1114
+ "loss": 0.004,
1115
+ "rewards/accuracies": 1.0,
1116
+ "rewards/chosen": 12.209739685058594,
1117
+ "rewards/margins": 145.9217529296875,
1118
+ "rewards/rejected": -133.71200561523438,
1119
+ "step": 74
1120
+ },
1121
+ {
1122
+ "epoch": 1.6741573033707864,
1123
+ "grad_norm": 5.181153774261475,
1124
+ "learning_rate": 1.4201667377367324e-06,
1125
+ "logits/chosen": 1.5291459560394287,
1126
+ "logits/rejected": 1.390205979347229,
1127
+ "logps/chosen": -2819.557861328125,
1128
+ "logps/rejected": -3400.41748046875,
1129
+ "loss": 0.0112,
1130
+ "rewards/accuracies": 0.984375,
1131
+ "rewards/chosen": 6.913262367248535,
1132
+ "rewards/margins": 108.99024200439453,
1133
+ "rewards/rejected": -102.07699584960938,
1134
+ "step": 75
1135
+ },
1136
+ {
1137
+ "epoch": 1.696629213483146,
1138
+ "grad_norm": 5.866981506347656,
1139
+ "learning_rate": 1.3803208416703752e-06,
1140
+ "logits/chosen": 1.509679913520813,
1141
+ "logits/rejected": 1.3863307237625122,
1142
+ "logps/chosen": -2517.104736328125,
1143
+ "logps/rejected": -3187.1181640625,
1144
+ "loss": 0.0026,
1145
+ "rewards/accuracies": 1.0,
1146
+ "rewards/chosen": 6.015058517456055,
1147
+ "rewards/margins": 110.0936508178711,
1148
+ "rewards/rejected": -104.07859802246094,
1149
+ "step": 76
1150
+ },
1151
+ {
1152
+ "epoch": 1.7191011235955056,
1153
+ "grad_norm": 3.792738199234009,
1154
+ "learning_rate": 1.3405597715289522e-06,
1155
+ "logits/chosen": 1.4075974225997925,
1156
+ "logits/rejected": 1.297675609588623,
1157
+ "logps/chosen": -3116.082275390625,
1158
+ "logps/rejected": -3820.78271484375,
1159
+ "loss": 0.0017,
1160
+ "rewards/accuracies": 1.0,
1161
+ "rewards/chosen": 6.922908782958984,
1162
+ "rewards/margins": 124.51133728027344,
1163
+ "rewards/rejected": -117.58842468261719,
1164
+ "step": 77
1165
+ },
1166
+ {
1167
+ "epoch": 1.7415730337078652,
1168
+ "grad_norm": 8.345385551452637,
1169
+ "learning_rate": 1.3009117090744173e-06,
1170
+ "logits/chosen": 1.5826494693756104,
1171
+ "logits/rejected": 1.2875326871871948,
1172
+ "logps/chosen": -2909.03515625,
1173
+ "logps/rejected": -3438.2587890625,
1174
+ "loss": 0.0111,
1175
+ "rewards/accuracies": 1.0,
1176
+ "rewards/chosen": 8.310379981994629,
1177
+ "rewards/margins": 140.91641235351562,
1178
+ "rewards/rejected": -132.6060333251953,
1179
+ "step": 78
1180
+ },
1181
+ {
1182
+ "epoch": 1.7640449438202248,
1183
+ "grad_norm": 0.4116104245185852,
1184
+ "learning_rate": 1.2614047559713923e-06,
1185
+ "logits/chosen": 1.4220818281173706,
1186
+ "logits/rejected": 1.2691839933395386,
1187
+ "logps/chosen": -3212.60693359375,
1188
+ "logps/rejected": -3793.721435546875,
1189
+ "loss": 0.0003,
1190
+ "rewards/accuracies": 1.0,
1191
+ "rewards/chosen": 2.4821667671203613,
1192
+ "rewards/margins": 128.71267700195312,
1193
+ "rewards/rejected": -126.23049926757812,
1194
+ "step": 79
1195
+ },
1196
+ {
1197
+ "epoch": 1.7865168539325844,
1198
+ "grad_norm": 0.8209803700447083,
1199
+ "learning_rate": 1.2220669138693288e-06,
1200
+ "logits/chosen": 1.3909624814987183,
1201
+ "logits/rejected": 1.1474812030792236,
1202
+ "logps/chosen": -2994.385009765625,
1203
+ "logps/rejected": -3750.771728515625,
1204
+ "loss": 0.0112,
1205
+ "rewards/accuracies": 0.984375,
1206
+ "rewards/chosen": 9.527303695678711,
1207
+ "rewards/margins": 137.7163543701172,
1208
+ "rewards/rejected": -128.18905639648438,
1209
+ "step": 80
1210
+ },
1211
+ {
1212
+ "epoch": 1.8089887640449438,
1213
+ "grad_norm": 1.4425156116485596,
1214
+ "learning_rate": 1.1829260645555634e-06,
1215
+ "logits/chosen": 1.3281006813049316,
1216
+ "logits/rejected": 1.039908766746521,
1217
+ "logps/chosen": -3059.208251953125,
1218
+ "logps/rejected": -3867.33349609375,
1219
+ "loss": 0.0108,
1220
+ "rewards/accuracies": 1.0,
1221
+ "rewards/chosen": 12.086620330810547,
1222
+ "rewards/margins": 160.84959411621094,
1223
+ "rewards/rejected": -148.76295471191406,
1224
+ "step": 81
1225
+ },
1226
+ {
1227
+ "epoch": 1.8314606741573034,
1228
+ "grad_norm": 0.7217972278594971,
1229
+ "learning_rate": 1.1440099501933277e-06,
1230
+ "logits/chosen": 1.3363004922866821,
1231
+ "logits/rejected": 1.2744730710983276,
1232
+ "logps/chosen": -3156.716796875,
1233
+ "logps/rejected": -4011.334716796875,
1234
+ "loss": 0.001,
1235
+ "rewards/accuracies": 1.0,
1236
+ "rewards/chosen": 4.8549346923828125,
1237
+ "rewards/margins": 134.17984008789062,
1238
+ "rewards/rejected": -129.3249053955078,
1239
+ "step": 82
1240
+ },
1241
+ {
1242
+ "epoch": 1.8539325842696628,
1243
+ "grad_norm": 1.5164899826049805,
1244
+ "learning_rate": 1.1053461536587183e-06,
1245
+ "logits/chosen": 1.4580892324447632,
1246
+ "logits/rejected": 1.2366647720336914,
1247
+ "logps/chosen": -2984.4619140625,
1248
+ "logps/rejected": -3910.234375,
1249
+ "loss": 0.0001,
1250
+ "rewards/accuracies": 1.0,
1251
+ "rewards/chosen": 9.195051193237305,
1252
+ "rewards/margins": 148.3942413330078,
1253
+ "rewards/rejected": -139.1991729736328,
1254
+ "step": 83
1255
+ },
1256
+ {
1257
+ "epoch": 1.8764044943820224,
1258
+ "grad_norm": 3.071080446243286,
1259
+ "learning_rate": 1.0669620789905688e-06,
1260
+ "logits/chosen": 1.5336228609085083,
1261
+ "logits/rejected": 1.3450926542282104,
1262
+ "logps/chosen": -2671.64892578125,
1263
+ "logps/rejected": -3312.888427734375,
1264
+ "loss": 0.0007,
1265
+ "rewards/accuracies": 1.0,
1266
+ "rewards/chosen": 5.30421257019043,
1267
+ "rewards/margins": 96.9708023071289,
1268
+ "rewards/rejected": -91.66659545898438,
1269
+ "step": 84
1270
+ },
1271
+ {
1272
+ "epoch": 1.898876404494382,
1273
+ "grad_norm": 0.2966591715812683,
1274
+ "learning_rate": 1.0288849319670773e-06,
1275
+ "logits/chosen": 1.5615055561065674,
1276
+ "logits/rejected": 1.4262051582336426,
1277
+ "logps/chosen": -2924.010498046875,
1278
+ "logps/rejected": -3439.7509765625,
1279
+ "loss": 0.0001,
1280
+ "rewards/accuracies": 1.0,
1281
+ "rewards/chosen": 4.811070919036865,
1282
+ "rewards/margins": 107.32271575927734,
1283
+ "rewards/rejected": -102.51164245605469,
1284
+ "step": 85
1285
+ },
1286
+ {
1287
+ "epoch": 1.9213483146067416,
1288
+ "grad_norm": 0.05935266241431236,
1289
+ "learning_rate": 9.911417008229545e-07,
1290
+ "logits/chosen": 1.4063825607299805,
1291
+ "logits/rejected": 1.1860499382019043,
1292
+ "logps/chosen": -2746.5126953125,
1293
+ "logps/rejected": -3493.92578125,
1294
+ "loss": 0.0325,
1295
+ "rewards/accuracies": 0.953125,
1296
+ "rewards/chosen": 11.67589282989502,
1297
+ "rewards/margins": 137.2821502685547,
1298
+ "rewards/rejected": -125.60626220703125,
1299
+ "step": 86
1300
+ },
1301
+ {
1302
+ "epoch": 1.9438202247191012,
1303
+ "grad_norm": 0.21089386940002441,
1304
+ "learning_rate": 9.537591371207668e-07,
1305
+ "logits/chosen": 1.5266857147216797,
1306
+ "logits/rejected": 1.4005635976791382,
1307
+ "logps/chosen": -2387.665771484375,
1308
+ "logps/rejected": -3293.546630859375,
1309
+ "loss": 0.0001,
1310
+ "rewards/accuracies": 1.0,
1311
+ "rewards/chosen": 5.131157875061035,
1312
+ "rewards/margins": 137.9029083251953,
1313
+ "rewards/rejected": -132.77175903320312,
1314
+ "step": 87
1315
+ },
1316
+ {
1317
+ "epoch": 1.9662921348314608,
1318
+ "grad_norm": 0.4727032780647278,
1319
+ "learning_rate": 9.167637367900192e-07,
1320
+ "logits/chosen": 1.5321190357208252,
1321
+ "logits/rejected": 1.3832690715789795,
1322
+ "logps/chosen": -2469.994384765625,
1323
+ "logps/rejected": -3097.712890625,
1324
+ "loss": 0.0117,
1325
+ "rewards/accuracies": 0.984375,
1326
+ "rewards/chosen": 13.177355766296387,
1327
+ "rewards/margins": 116.04686737060547,
1328
+ "rewards/rejected": -102.8695068359375,
1329
+ "step": 88
1330
+ },
1331
+ {
1332
+ "epoch": 1.9887640449438202,
1333
+ "grad_norm": 0.39027953147888184,
1334
+ "learning_rate": 8.801817213474331e-07,
1335
+ "logits/chosen": 1.5794587135314941,
1336
+ "logits/rejected": 1.3486638069152832,
1337
+ "logps/chosen": -2815.1982421875,
1338
+ "logps/rejected": -3435.67919921875,
1339
+ "loss": 0.0002,
1340
+ "rewards/accuracies": 1.0,
1341
+ "rewards/chosen": 8.544872283935547,
1342
+ "rewards/margins": 112.28601837158203,
1343
+ "rewards/rejected": -103.74114227294922,
1344
+ "step": 89
1345
+ },
1346
+ {
1347
+ "epoch": 2.0,
1348
+ "grad_norm": 0.14720159769058228,
1349
+ "learning_rate": 8.44039019311717e-07,
1350
+ "logits/chosen": 1.492700457572937,
1351
+ "logits/rejected": 1.3120732307434082,
1352
+ "logps/chosen": -3285.24267578125,
1353
+ "logps/rejected": -3985.763916015625,
1354
+ "loss": 0.0,
1355
+ "rewards/accuracies": 1.0,
1356
+ "rewards/chosen": 11.00776481628418,
1357
+ "rewards/margins": 157.06927490234375,
1358
+ "rewards/rejected": -146.06150817871094,
1359
+ "step": 90
1360
+ },
1361
+ {
1362
+ "epoch": 2.0224719101123596,
1363
+ "grad_norm": 0.019609661772847176,
1364
+ "learning_rate": 8.08361247826011e-07,
1365
+ "logits/chosen": 1.3633915185928345,
1366
+ "logits/rejected": 1.1915699243545532,
1367
+ "logps/chosen": -3307.618408203125,
1368
+ "logps/rejected": -4103.1875,
1369
+ "loss": 0.0,
1370
+ "rewards/accuracies": 1.0,
1371
+ "rewards/chosen": -1.502930760383606,
1372
+ "rewards/margins": 150.0188446044922,
1373
+ "rewards/rejected": -151.52178955078125,
1374
+ "step": 91
1375
+ },
1376
+ {
1377
+ "epoch": 2.044943820224719,
1378
+ "grad_norm": 0.026041870936751366,
1379
+ "learning_rate": 7.731736945010249e-07,
1380
+ "logits/chosen": 1.4235529899597168,
1381
+ "logits/rejected": 1.0836195945739746,
1382
+ "logps/chosen": -3224.001708984375,
1383
+ "logps/rejected": -3803.459228515625,
1384
+ "loss": 0.0,
1385
+ "rewards/accuracies": 1.0,
1386
+ "rewards/chosen": 9.049484252929688,
1387
+ "rewards/margins": 149.46070861816406,
1388
+ "rewards/rejected": -140.41123962402344,
1389
+ "step": 92
1390
+ },
1391
+ {
1392
+ "epoch": 2.067415730337079,
1393
+ "grad_norm": 0.36662229895591736,
1394
+ "learning_rate": 7.385012994917405e-07,
1395
+ "logits/chosen": 1.461303949356079,
1396
+ "logits/rejected": 1.401003360748291,
1397
+ "logps/chosen": -2710.856689453125,
1398
+ "logps/rejected": -3409.259765625,
1399
+ "loss": 0.0109,
1400
+ "rewards/accuracies": 0.984375,
1401
+ "rewards/chosen": 5.063204765319824,
1402
+ "rewards/margins": 96.820068359375,
1403
+ "rewards/rejected": -91.75686645507812,
1404
+ "step": 93
1405
+ },
1406
+ {
1407
+ "epoch": 2.0898876404494384,
1408
+ "grad_norm": 0.22327114641666412,
1409
+ "learning_rate": 7.043686378203864e-07,
1410
+ "logits/chosen": 1.5914536714553833,
1411
+ "logits/rejected": 1.3907164335250854,
1412
+ "logps/chosen": -2657.873291015625,
1413
+ "logps/rejected": -3420.0283203125,
1414
+ "loss": 0.0109,
1415
+ "rewards/accuracies": 0.984375,
1416
+ "rewards/chosen": 12.433341979980469,
1417
+ "rewards/margins": 118.74362182617188,
1418
+ "rewards/rejected": -106.31027221679688,
1419
+ "step": 94
1420
+ },
1421
+ {
1422
+ "epoch": 2.1123595505617976,
1423
+ "grad_norm": 0.006661942228674889,
1424
+ "learning_rate": 6.707999019582104e-07,
1425
+ "logits/chosen": 1.4297124147415161,
1426
+ "logits/rejected": 1.2694649696350098,
1427
+ "logps/chosen": -2567.587890625,
1428
+ "logps/rejected": -3557.106201171875,
1429
+ "loss": 0.0,
1430
+ "rewards/accuracies": 1.0,
1431
+ "rewards/chosen": 7.91953182220459,
1432
+ "rewards/margins": 146.32005310058594,
1433
+ "rewards/rejected": -138.4005126953125,
1434
+ "step": 95
1435
+ },
1436
+ {
1437
+ "epoch": 2.134831460674157,
1438
+ "grad_norm": 0.010272935964167118,
1439
+ "learning_rate": 6.378188846783898e-07,
1440
+ "logits/chosen": 1.584874153137207,
1441
+ "logits/rejected": 1.3883558511734009,
1442
+ "logps/chosen": -2836.077880859375,
1443
+ "logps/rejected": -3408.93115234375,
1444
+ "loss": 0.0,
1445
+ "rewards/accuracies": 1.0,
1446
+ "rewards/chosen": 6.626905918121338,
1447
+ "rewards/margins": 121.95980834960938,
1448
+ "rewards/rejected": -115.33291625976562,
1449
+ "step": 96
1450
+ },
1451
+ {
1452
+ "epoch": 2.157303370786517,
1453
+ "grad_norm": 0.006059441715478897,
1454
+ "learning_rate": 6.054489621922477e-07,
1455
+ "logits/chosen": 1.6233469247817993,
1456
+ "logits/rejected": 1.4364811182022095,
1457
+ "logps/chosen": -2997.014404296875,
1458
+ "logps/rejected": -3488.54150390625,
1459
+ "loss": 0.0108,
1460
+ "rewards/accuracies": 0.984375,
1461
+ "rewards/chosen": 12.179953575134277,
1462
+ "rewards/margins": 123.74882507324219,
1463
+ "rewards/rejected": -111.56886291503906,
1464
+ "step": 97
1465
+ },
1466
+ {
1467
+ "epoch": 2.1797752808988764,
1468
+ "grad_norm": 0.23592473566532135,
1469
+ "learning_rate": 5.737130775807122e-07,
1470
+ "logits/chosen": 1.4150291681289673,
1471
+ "logits/rejected": 1.3036937713623047,
1472
+ "logps/chosen": -2623.100830078125,
1473
+ "logps/rejected": -3417.743408203125,
1474
+ "loss": 0.011,
1475
+ "rewards/accuracies": 0.984375,
1476
+ "rewards/chosen": 9.777491569519043,
1477
+ "rewards/margins": 126.9278335571289,
1478
+ "rewards/rejected": -117.15032196044922,
1479
+ "step": 98
1480
+ },
1481
+ {
1482
+ "epoch": 2.202247191011236,
1483
+ "grad_norm": 0.0040085772052407265,
1484
+ "learning_rate": 5.426337245327703e-07,
1485
+ "logits/chosen": 1.3026162385940552,
1486
+ "logits/rejected": 1.194283127784729,
1487
+ "logps/chosen": -2882.58154296875,
1488
+ "logps/rejected": -3794.05078125,
1489
+ "loss": 0.0,
1490
+ "rewards/accuracies": 1.0,
1491
+ "rewards/chosen": 10.322346687316895,
1492
+ "rewards/margins": 140.7698211669922,
1493
+ "rewards/rejected": -130.44747924804688,
1494
+ "step": 99
1495
+ },
1496
+ {
1497
+ "epoch": 2.2247191011235956,
1498
+ "grad_norm": 0.005036317277699709,
1499
+ "learning_rate": 5.122329314024422e-07,
1500
+ "logits/chosen": 1.4347069263458252,
1501
+ "logits/rejected": 1.2561771869659424,
1502
+ "logps/chosen": -2425.357177734375,
1503
+ "logps/rejected": -3138.833740234375,
1504
+ "loss": 0.0,
1505
+ "rewards/accuracies": 1.0,
1506
+ "rewards/chosen": 13.752297401428223,
1507
+ "rewards/margins": 120.6755599975586,
1508
+ "rewards/rejected": -106.92326354980469,
1509
+ "step": 100
1510
+ },
1511
+ {
1512
+ "epoch": 2.247191011235955,
1513
+ "grad_norm": 0.267286479473114,
1514
+ "learning_rate": 4.825322455955759e-07,
1515
+ "logits/chosen": 1.376643419265747,
1516
+ "logits/rejected": 1.2739124298095703,
1517
+ "logps/chosen": -2709.716796875,
1518
+ "logps/rejected": -3520.384765625,
1519
+ "loss": 0.0108,
1520
+ "rewards/accuracies": 0.984375,
1521
+ "rewards/chosen": 10.822145462036133,
1522
+ "rewards/margins": 141.28472900390625,
1523
+ "rewards/rejected": -130.4625701904297,
1524
+ "step": 101
1525
+ },
1526
+ {
1527
+ "epoch": 2.2696629213483144,
1528
+ "grad_norm": 0.37806662917137146,
1529
+ "learning_rate": 4.5355271829752307e-07,
1530
+ "logits/chosen": 1.4881722927093506,
1531
+ "logits/rejected": 1.346581220626831,
1532
+ "logps/chosen": -2821.6923828125,
1533
+ "logps/rejected": -3442.4619140625,
1534
+ "loss": 0.0108,
1535
+ "rewards/accuracies": 0.984375,
1536
+ "rewards/chosen": 9.021244049072266,
1537
+ "rewards/margins": 126.26439666748047,
1538
+ "rewards/rejected": -117.2431640625,
1539
+ "step": 102
1540
+ },
1541
+ {
1542
+ "epoch": 2.292134831460674,
1543
+ "grad_norm": 0.0023486721329391003,
1544
+ "learning_rate": 4.2531488955252726e-07,
1545
+ "logits/chosen": 1.4559850692749023,
1546
+ "logits/rejected": 1.1960179805755615,
1547
+ "logps/chosen": -2982.266357421875,
1548
+ "logps/rejected": -3776.720458984375,
1549
+ "loss": 0.0108,
1550
+ "rewards/accuracies": 0.984375,
1551
+ "rewards/chosen": 13.267072677612305,
1552
+ "rewards/margins": 156.5282440185547,
1553
+ "rewards/rejected": -143.26113891601562,
1554
+ "step": 103
1555
+ },
1556
+ {
1557
+ "epoch": 2.3146067415730336,
1558
+ "grad_norm": 0.006942716892808676,
1559
+ "learning_rate": 3.978387737053994e-07,
1560
+ "logits/chosen": 1.5748894214630127,
1561
+ "logits/rejected": 1.4408270120620728,
1562
+ "logps/chosen": -2752.75634765625,
1563
+ "logps/rejected": -3425.216064453125,
1564
+ "loss": 0.0,
1565
+ "rewards/accuracies": 1.0,
1566
+ "rewards/chosen": 15.169326782226562,
1567
+ "rewards/margins": 107.41685485839844,
1568
+ "rewards/rejected": -92.24752807617188,
1569
+ "step": 104
1570
+ },
1571
+ {
1572
+ "epoch": 2.337078651685393,
1573
+ "grad_norm": 0.1621246337890625,
1574
+ "learning_rate": 3.7114384521579234e-07,
1575
+ "logits/chosen": 1.6052483320236206,
1576
+ "logits/rejected": 1.446576714515686,
1577
+ "logps/chosen": -2733.099609375,
1578
+ "logps/rejected": -3558.54931640625,
1579
+ "loss": 0.0108,
1580
+ "rewards/accuracies": 1.0,
1581
+ "rewards/chosen": 6.2836151123046875,
1582
+ "rewards/margins": 120.5184326171875,
1583
+ "rewards/rejected": -114.23482513427734,
1584
+ "step": 105
1585
+ },
1586
+ {
1587
+ "epoch": 2.359550561797753,
1588
+ "grad_norm": 0.0010318144923076034,
1589
+ "learning_rate": 3.4524902485514043e-07,
1590
+ "logits/chosen": 1.5261331796646118,
1591
+ "logits/rejected": 1.2617827653884888,
1592
+ "logps/chosen": -2832.090576171875,
1593
+ "logps/rejected": -3448.433837890625,
1594
+ "loss": 0.0,
1595
+ "rewards/accuracies": 1.0,
1596
+ "rewards/chosen": 8.34963607788086,
1597
+ "rewards/margins": 127.82434844970703,
1598
+ "rewards/rejected": -119.47471618652344,
1599
+ "step": 106
1600
+ },
1601
+ {
1602
+ "epoch": 2.3820224719101124,
1603
+ "grad_norm": 0.001886666170321405,
1604
+ "learning_rate": 3.201726662960363e-07,
1605
+ "logits/chosen": 1.4487926959991455,
1606
+ "logits/rejected": 1.2953495979309082,
1607
+ "logps/chosen": -2931.4873046875,
1608
+ "logps/rejected": -3765.528564453125,
1609
+ "loss": 0.0,
1610
+ "rewards/accuracies": 1.0,
1611
+ "rewards/chosen": 5.4385576248168945,
1612
+ "rewards/margins": 141.048583984375,
1613
+ "rewards/rejected": -135.6100311279297,
1614
+ "step": 107
1615
+ },
1616
+ {
1617
+ "epoch": 2.404494382022472,
1618
+ "grad_norm": 0.0003725312708411366,
1619
+ "learning_rate": 2.9593254310355485e-07,
1620
+ "logits/chosen": 1.5249533653259277,
1621
+ "logits/rejected": 1.36188805103302,
1622
+ "logps/chosen": -2958.6279296875,
1623
+ "logps/rejected": -3625.80859375,
1624
+ "loss": 0.0,
1625
+ "rewards/accuracies": 1.0,
1626
+ "rewards/chosen": 8.046311378479004,
1627
+ "rewards/margins": 136.48867797851562,
1628
+ "rewards/rejected": -128.44235229492188,
1629
+ "step": 108
1630
+ },
1631
+ {
1632
+ "epoch": 2.4269662921348316,
1633
+ "grad_norm": 0.0058527453802526,
1634
+ "learning_rate": 2.725458361377465e-07,
1635
+ "logits/chosen": 1.449507236480713,
1636
+ "logits/rejected": 1.195552110671997,
1637
+ "logps/chosen": -3101.913330078125,
1638
+ "logps/rejected": -3919.42626953125,
1639
+ "loss": 0.0108,
1640
+ "rewards/accuracies": 0.984375,
1641
+ "rewards/chosen": 9.668648719787598,
1642
+ "rewards/margins": 170.04879760742188,
1643
+ "rewards/rejected": -160.38015747070312,
1644
+ "step": 109
1645
+ },
1646
+ {
1647
+ "epoch": 2.449438202247191,
1648
+ "grad_norm": 0.004259227309376001,
1649
+ "learning_rate": 2.5002912137622743e-07,
1650
+ "logits/chosen": 1.3936243057250977,
1651
+ "logits/rejected": 1.1740200519561768,
1652
+ "logps/chosen": -2701.333740234375,
1653
+ "logps/rejected": -3472.6923828125,
1654
+ "loss": 0.0108,
1655
+ "rewards/accuracies": 0.984375,
1656
+ "rewards/chosen": 11.122644424438477,
1657
+ "rewards/margins": 145.8236083984375,
1658
+ "rewards/rejected": -134.70095825195312,
1659
+ "step": 110
1660
+ },
1661
+ {
1662
+ "epoch": 2.4719101123595504,
1663
+ "grad_norm": 0.010651292279362679,
1664
+ "learning_rate": 2.2839835816549365e-07,
1665
+ "logits/chosen": 1.711632490158081,
1666
+ "logits/rejected": 1.4845446348190308,
1667
+ "logps/chosen": -3014.84912109375,
1668
+ "logps/rejected": -3401.6298828125,
1669
+ "loss": 0.0,
1670
+ "rewards/accuracies": 1.0,
1671
+ "rewards/chosen": 8.185779571533203,
1672
+ "rewards/margins": 117.65122985839844,
1673
+ "rewards/rejected": -109.4654541015625,
1674
+ "step": 111
1675
+ },
1676
+ {
1677
+ "epoch": 2.49438202247191,
1678
+ "grad_norm": 0.21365472674369812,
1679
+ "learning_rate": 2.0766887790929072e-07,
1680
+ "logits/chosen": 1.5201102495193481,
1681
+ "logits/rejected": 1.3360121250152588,
1682
+ "logps/chosen": -2596.279296875,
1683
+ "logps/rejected": -3536.295166015625,
1684
+ "loss": 0.0108,
1685
+ "rewards/accuracies": 0.984375,
1686
+ "rewards/chosen": 9.575386047363281,
1687
+ "rewards/margins": 136.92886352539062,
1688
+ "rewards/rejected": -127.35346221923828,
1689
+ "step": 112
1690
+ },
1691
+ {
1692
+ "epoch": 2.5168539325842696,
1693
+ "grad_norm": 0.06359975039958954,
1694
+ "learning_rate": 1.8785537320205808e-07,
1695
+ "logits/chosen": 1.4054570198059082,
1696
+ "logits/rejected": 1.304233431816101,
1697
+ "logps/chosen": -2882.770263671875,
1698
+ "logps/rejected": -3637.910888671875,
1699
+ "loss": 0.0,
1700
+ "rewards/accuracies": 1.0,
1701
+ "rewards/chosen": 9.500956535339355,
1702
+ "rewards/margins": 114.78219604492188,
1703
+ "rewards/rejected": -105.28123474121094,
1704
+ "step": 113
1705
+ },
1706
+ {
1707
+ "epoch": 2.539325842696629,
1708
+ "grad_norm": 0.039696987718343735,
1709
+ "learning_rate": 1.6897188741514286e-07,
1710
+ "logits/chosen": 1.3486000299453735,
1711
+ "logits/rejected": 1.2321511507034302,
1712
+ "logps/chosen": -2972.344970703125,
1713
+ "logps/rejected": -3984.229248046875,
1714
+ "loss": 0.0,
1715
+ "rewards/accuracies": 1.0,
1716
+ "rewards/chosen": 5.131504535675049,
1717
+ "rewards/margins": 162.7792205810547,
1718
+ "rewards/rejected": -157.64772033691406,
1719
+ "step": 114
1720
+ },
1721
+ {
1722
+ "epoch": 2.561797752808989,
1723
+ "grad_norm": 0.002948309760540724,
1724
+ "learning_rate": 1.510318047431713e-07,
1725
+ "logits/chosen": 1.4727129936218262,
1726
+ "logits/rejected": 1.3785285949707031,
1727
+ "logps/chosen": -2675.683837890625,
1728
+ "logps/rejected": -3297.158447265625,
1729
+ "loss": 0.0,
1730
+ "rewards/accuracies": 1.0,
1731
+ "rewards/chosen": 7.861666679382324,
1732
+ "rewards/margins": 110.47186279296875,
1733
+ "rewards/rejected": -102.61019134521484,
1734
+ "step": 115
1735
+ },
1736
+ {
1737
+ "epoch": 2.5842696629213484,
1738
+ "grad_norm": 0.07731137424707413,
1739
+ "learning_rate": 1.3404784071763015e-07,
1740
+ "logits/chosen": 1.4941082000732422,
1741
+ "logits/rejected": 1.4053186178207397,
1742
+ "logps/chosen": -2728.80615234375,
1743
+ "logps/rejected": -3415.1708984375,
1744
+ "loss": 0.0001,
1745
+ "rewards/accuracies": 1.0,
1746
+ "rewards/chosen": 10.857705116271973,
1747
+ "rewards/margins": 109.21708679199219,
1748
+ "rewards/rejected": -98.35939025878906,
1749
+ "step": 116
1750
+ },
1751
+ {
1752
+ "epoch": 2.606741573033708,
1753
+ "grad_norm": 0.01123058795928955,
1754
+ "learning_rate": 1.1803203319438056e-07,
1755
+ "logits/chosen": 1.4337643384933472,
1756
+ "logits/rejected": 1.2645751237869263,
1757
+ "logps/chosen": -2684.67041015625,
1758
+ "logps/rejected": -3446.0908203125,
1759
+ "loss": 0.0,
1760
+ "rewards/accuracies": 1.0,
1761
+ "rewards/chosen": 13.534300804138184,
1762
+ "rewards/margins": 135.90628051757812,
1763
+ "rewards/rejected": -122.37198638916016,
1764
+ "step": 117
1765
+ },
1766
+ {
1767
+ "epoch": 2.629213483146067,
1768
+ "grad_norm": 0.7818881869316101,
1769
+ "learning_rate": 1.0299573382149235e-07,
1770
+ "logits/chosen": 1.4340091943740845,
1771
+ "logits/rejected": 1.2151674032211304,
1772
+ "logps/chosen": -3169.663330078125,
1773
+ "logps/rejected": -4115.5751953125,
1774
+ "loss": 0.0219,
1775
+ "rewards/accuracies": 0.984375,
1776
+ "rewards/chosen": 11.765824317932129,
1777
+ "rewards/margins": 178.14181518554688,
1778
+ "rewards/rejected": -166.37596130371094,
1779
+ "step": 118
1780
+ },
1781
+ {
1782
+ "epoch": 2.6516853932584272,
1783
+ "grad_norm": 0.11178380995988846,
1784
+ "learning_rate": 8.894959999345015e-08,
1785
+ "logits/chosen": 1.4085586071014404,
1786
+ "logits/rejected": 1.317073941230774,
1787
+ "logps/chosen": -2706.8623046875,
1788
+ "logps/rejected": -3629.9091796875,
1789
+ "loss": 0.0109,
1790
+ "rewards/accuracies": 1.0,
1791
+ "rewards/chosen": 6.750637531280518,
1792
+ "rewards/margins": 140.9330291748047,
1793
+ "rewards/rejected": -134.18240356445312,
1794
+ "step": 119
1795
+ },
1796
+ {
1797
+ "epoch": 2.6741573033707864,
1798
+ "grad_norm": 0.009486271999776363,
1799
+ "learning_rate": 7.590358729742808e-08,
1800
+ "logits/chosen": 1.5044245719909668,
1801
+ "logits/rejected": 1.3787866830825806,
1802
+ "logps/chosen": -2867.752197265625,
1803
+ "logps/rejected": -3833.509765625,
1804
+ "loss": 0.0,
1805
+ "rewards/accuracies": 1.0,
1806
+ "rewards/chosen": 6.230460166931152,
1807
+ "rewards/margins": 134.28904724121094,
1808
+ "rewards/rejected": -128.05857849121094,
1809
+ "step": 120
1810
+ },
1811
+ {
1812
+ "epoch": 2.696629213483146,
1813
+ "grad_norm": 0.009250489063560963,
1814
+ "learning_rate": 6.386694245699181e-08,
1815
+ "logits/chosen": 1.5157657861709595,
1816
+ "logits/rejected": 1.2433254718780518,
1817
+ "logps/chosen": -3022.373046875,
1818
+ "logps/rejected": -3732.22900390625,
1819
+ "loss": 0.0,
1820
+ "rewards/accuracies": 1.0,
1821
+ "rewards/chosen": 2.7557570934295654,
1822
+ "rewards/margins": 130.84677124023438,
1823
+ "rewards/rejected": -128.0910186767578,
1824
+ "step": 121
1825
+ },
1826
+ {
1827
+ "epoch": 2.7191011235955056,
1828
+ "grad_norm": 0.1917319893836975,
1829
+ "learning_rate": 5.284819677822611e-08,
1830
+ "logits/chosen": 1.6072005033493042,
1831
+ "logits/rejected": 1.528849720954895,
1832
+ "logps/chosen": -2894.672119140625,
1833
+ "logps/rejected": -3495.853515625,
1834
+ "loss": 0.0108,
1835
+ "rewards/accuracies": 0.984375,
1836
+ "rewards/chosen": 3.3133740425109863,
1837
+ "rewards/margins": 105.75206756591797,
1838
+ "rewards/rejected": -102.43870544433594,
1839
+ "step": 122
1840
+ },
1841
+ {
1842
+ "epoch": 2.741573033707865,
1843
+ "grad_norm": 0.03384300321340561,
1844
+ "learning_rate": 4.285516010293522e-08,
1845
+ "logits/chosen": 1.4517195224761963,
1846
+ "logits/rejected": 1.3014264106750488,
1847
+ "logps/chosen": -2851.070556640625,
1848
+ "logps/rejected": -3593.665771484375,
1849
+ "loss": 0.0,
1850
+ "rewards/accuracies": 1.0,
1851
+ "rewards/chosen": 7.4544267654418945,
1852
+ "rewards/margins": 122.42274475097656,
1853
+ "rewards/rejected": -114.96832275390625,
1854
+ "step": 123
1855
+ },
1856
+ {
1857
+ "epoch": 2.764044943820225,
1858
+ "grad_norm": 0.24889694154262543,
1859
+ "learning_rate": 3.389491527319999e-08,
1860
+ "logits/chosen": 1.4583051204681396,
1861
+ "logits/rejected": 1.2614139318466187,
1862
+ "logps/chosen": -2827.8134765625,
1863
+ "logps/rejected": -3561.30810546875,
1864
+ "loss": 0.0217,
1865
+ "rewards/accuracies": 0.984375,
1866
+ "rewards/chosen": 0.6058197617530823,
1867
+ "rewards/margins": 129.5867919921875,
1868
+ "rewards/rejected": -128.98095703125,
1869
+ "step": 124
1870
+ },
1871
+ {
1872
+ "epoch": 2.7865168539325844,
1873
+ "grad_norm": 0.06888113170862198,
1874
+ "learning_rate": 2.5973813111218548e-08,
1875
+ "logits/chosen": 1.529250144958496,
1876
+ "logits/rejected": 1.247063159942627,
1877
+ "logps/chosen": -2882.323974609375,
1878
+ "logps/rejected": -3656.96044921875,
1879
+ "loss": 0.0002,
1880
+ "rewards/accuracies": 1.0,
1881
+ "rewards/chosen": 9.58204174041748,
1882
+ "rewards/margins": 154.1719970703125,
1883
+ "rewards/rejected": -144.5899658203125,
1884
+ "step": 125
1885
+ },
1886
+ {
1887
+ "epoch": 2.808988764044944,
1888
+ "grad_norm": 0.0029755791183561087,
1889
+ "learning_rate": 1.909746791798317e-08,
1890
+ "logits/chosen": 1.4555425643920898,
1891
+ "logits/rejected": 1.2920844554901123,
1892
+ "logps/chosen": -2807.64208984375,
1893
+ "logps/rejected": -3475.54931640625,
1894
+ "loss": 0.0217,
1895
+ "rewards/accuracies": 0.984375,
1896
+ "rewards/chosen": 5.643215179443359,
1897
+ "rewards/margins": 125.7391128540039,
1898
+ "rewards/rejected": -120.09590148925781,
1899
+ "step": 126
1900
+ },
1901
+ {
1902
+ "epoch": 2.831460674157303,
1903
+ "grad_norm": 0.009821542538702488,
1904
+ "learning_rate": 1.3270753493989374e-08,
1905
+ "logits/chosen": 1.535863995552063,
1906
+ "logits/rejected": 1.3580735921859741,
1907
+ "logps/chosen": -2754.88818359375,
1908
+ "logps/rejected": -3732.697021484375,
1909
+ "loss": 0.0,
1910
+ "rewards/accuracies": 1.0,
1911
+ "rewards/chosen": 7.623423099517822,
1912
+ "rewards/margins": 136.6768035888672,
1913
+ "rewards/rejected": -129.05337524414062,
1914
+ "step": 127
1915
+ },
1916
+ {
1917
+ "epoch": 2.853932584269663,
1918
+ "grad_norm": 0.5018057227134705,
1919
+ "learning_rate": 8.49779968479436e-09,
1920
+ "logits/chosen": 1.3728063106536865,
1921
+ "logits/rejected": 1.154386281967163,
1922
+ "logps/chosen": -3219.5546875,
1923
+ "logps/rejected": -3955.0615234375,
1924
+ "loss": 0.0108,
1925
+ "rewards/accuracies": 1.0,
1926
+ "rewards/chosen": 2.1031904220581055,
1927
+ "rewards/margins": 135.11688232421875,
1928
+ "rewards/rejected": -133.01368713378906,
1929
+ "step": 128
1930
+ },
1931
+ {
1932
+ "epoch": 2.8764044943820224,
1933
+ "grad_norm": 0.0029928251169621944,
1934
+ "learning_rate": 4.781989453874814e-09,
1935
+ "logits/chosen": 1.589327335357666,
1936
+ "logits/rejected": 1.44749116897583,
1937
+ "logps/chosen": -2659.24462890625,
1938
+ "logps/rejected": -3233.244873046875,
1939
+ "loss": 0.0,
1940
+ "rewards/accuracies": 1.0,
1941
+ "rewards/chosen": 10.386514663696289,
1942
+ "rewards/margins": 102.26481628417969,
1943
+ "rewards/rejected": -91.87830352783203,
1944
+ "step": 129
1945
+ },
1946
+ {
1947
+ "epoch": 2.898876404494382,
1948
+ "grad_norm": 0.009541017934679985,
1949
+ "learning_rate": 2.1259564848570834e-09,
1950
+ "logits/chosen": 1.5677722692489624,
1951
+ "logits/rejected": 1.2758667469024658,
1952
+ "logps/chosen": -2889.547607421875,
1953
+ "logps/rejected": -3603.37109375,
1954
+ "loss": 0.0,
1955
+ "rewards/accuracies": 1.0,
1956
+ "rewards/chosen": 15.972006797790527,
1957
+ "rewards/margins": 140.3019256591797,
1958
+ "rewards/rejected": -124.32991790771484,
1959
+ "step": 130
1960
+ },
1961
+ {
1962
+ "epoch": 2.9213483146067416,
1963
+ "grad_norm": 0.007502752356231213,
1964
+ "learning_rate": 5.315833148210603e-10,
1965
+ "logits/chosen": 1.6323837041854858,
1966
+ "logits/rejected": 1.446678876876831,
1967
+ "logps/chosen": -2922.07568359375,
1968
+ "logps/rejected": -3691.432373046875,
1969
+ "loss": 0.0108,
1970
+ "rewards/accuracies": 0.984375,
1971
+ "rewards/chosen": 12.317670822143555,
1972
+ "rewards/margins": 135.18690490722656,
1973
+ "rewards/rejected": -122.86924743652344,
1974
+ "step": 131
1975
+ },
1976
+ {
1977
+ "epoch": 2.943820224719101,
1978
+ "grad_norm": 0.2958358824253082,
1979
+ "learning_rate": 0.0,
1980
+ "logits/chosen": 1.4742579460144043,
1981
+ "logits/rejected": 1.2774202823638916,
1982
+ "logps/chosen": -2621.55615234375,
1983
+ "logps/rejected": -3527.73193359375,
1984
+ "loss": 0.0217,
1985
+ "rewards/accuracies": 0.984375,
1986
+ "rewards/chosen": 11.16303539276123,
1987
+ "rewards/margins": 133.13824462890625,
1988
+ "rewards/rejected": -121.9752197265625,
1989
+ "step": 132
1990
+ },
1991
+ {
1992
+ "epoch": 2.943820224719101,
1993
+ "step": 132,
1994
+ "total_flos": 228521444442112.0,
1995
+ "train_loss": 0.17045999738028772,
1996
+ "train_runtime": 5166.54,
1997
+ "train_samples_per_second": 1.651,
1998
+ "train_steps_per_second": 0.026
1999
+ }
2000
+ ],
2001
+ "logging_steps": 1,
2002
+ "max_steps": 132,
2003
+ "num_input_tokens_seen": 0,
2004
+ "num_train_epochs": 3,
2005
+ "save_steps": 50,
2006
+ "stateful_callbacks": {
2007
+ "TrainerControl": {
2008
+ "args": {
2009
+ "should_epoch_stop": false,
2010
+ "should_evaluate": false,
2011
+ "should_log": false,
2012
+ "should_save": true,
2013
+ "should_training_stop": true
2014
+ },
2015
+ "attributes": {}
2016
+ }
2017
+ },
2018
+ "total_flos": 228521444442112.0,
2019
+ "train_batch_size": 1,
2020
+ "trial_name": null,
2021
+ "trial_params": null
2022
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e6e2c40272fe7a35e1549423d44c7e15759729ea9b1e95eb6bce29915e1932
3
+ size 7800
training_loss.png ADDED
training_rewards_accuracies.png ADDED
vocab.json ADDED
The diff for this file is too large to render. See raw diff