fedovtt commited on
Commit
732a595
·
verified ·
1 Parent(s): 8f51b99

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44b5a90cbae015dfc7be5202391501d2928f4f0767384038e1df885dbf47ed45
3
  size 435773944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:750d7d2c026cb4edc0de34edd942498d472ecc7f9ec3e8eb5c04d941861c272e
3
  size 435773944
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb8d9363a7dfcef3b000a69e27d77c3b61ff00273a6f2f166771555b98f88230
3
  size 222034900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba134d01587202cdf24feca111dfe0825e7bc1c2d3e80daf37fa73e0d0c7ba0
3
  size 222034900
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f58a55fcf967a46936d11fd0124d1ff2bd8c83df24764f5a640a993eb06eec71
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:897cf9d004a67877002b3d743e6c35021fd74d92777e133acb701e8c64884078
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5220d0af0ff98b2d6e04334f1c29842da7230747bf3383cf1f883a4d2eb6d4f2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90c455b2a7603e9003556345290fd0185bd6c6017de2b595e3f9c6216f721e5e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.2731058597564697,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
- "epoch": 0.018373346398824104,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 10.397,
145
  "eval_steps_per_second": 2.601,
146
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 10,
@@ -167,12 +210,12 @@
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
- "should_training_stop": false
171
  },
172
  "attributes": {}
173
  }
174
  },
175
- "total_flos": 4.82956662496297e+16,
176
  "train_batch_size": 6,
177
  "trial_name": null,
178
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.2610796689987183,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.02449779519843214,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 10.397,
145
  "eval_steps_per_second": 2.601,
146
  "step": 150
147
+ },
148
+ {
149
+ "epoch": 0.019598236158745713,
150
+ "grad_norm": 0.8640761971473694,
151
+ "learning_rate": 2.2644444444444443e-06,
152
+ "loss": 0.9152,
153
+ "step": 160
154
+ },
155
+ {
156
+ "epoch": 0.02082312591866732,
157
+ "grad_norm": 1.1081897020339966,
158
+ "learning_rate": 1.6983333333333333e-06,
159
+ "loss": 1.0269,
160
+ "step": 170
161
+ },
162
+ {
163
+ "epoch": 0.022048015678588925,
164
+ "grad_norm": 1.8240900039672852,
165
+ "learning_rate": 1.1322222222222221e-06,
166
+ "loss": 1.0471,
167
+ "step": 180
168
+ },
169
+ {
170
+ "epoch": 0.023272905438510533,
171
+ "grad_norm": 5.866700649261475,
172
+ "learning_rate": 5.661111111111111e-07,
173
+ "loss": 1.4556,
174
+ "step": 190
175
+ },
176
+ {
177
+ "epoch": 0.02449779519843214,
178
+ "grad_norm": 24.9132022857666,
179
+ "learning_rate": 0.0,
180
+ "loss": 1.7105,
181
+ "step": 200
182
+ },
183
+ {
184
+ "epoch": 0.02449779519843214,
185
+ "eval_loss": 1.2610796689987183,
186
+ "eval_runtime": 247.8876,
187
+ "eval_samples_per_second": 10.4,
188
+ "eval_steps_per_second": 2.602,
189
+ "step": 200
190
  }
191
  ],
192
  "logging_steps": 10,
 
210
  "should_evaluate": false,
211
  "should_log": false,
212
  "should_save": true,
213
+ "should_training_stop": true
214
  },
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 6.42876087163945e+16,
219
  "train_batch_size": 6,
220
  "trial_name": null,
221
  "trial_params": null