sm commited on
Commit
4bb1b97
·
verified ·
1 Parent(s): 12bb7e2

Training in progress, step 46, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89f182b81f638f8ef69ce66e93af01d395057cd24d2aab33b40325ca5c81f3d8
3
  size 432223744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7c6f1084ed3700c3454a2448e5ed2b265c8884bf9fe07a9bc7ea28f5b7e9f11
3
  size 432223744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85324199877dfffb7607f7afa8d9b018a4116615c2246884a5654a617519a0c1
3
  size 864785974
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b7fc2a6d151125933a48bd6397cd8ad37fe4354457e9cf66b8a391ce3794cf
3
  size 864785974
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30daa9a7592cfa20f51b94b4aa708a7dc5cf2aa1ae681035d89e0609a44222f0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81741de6e898e1250e5cb590a0eb1c3e7bd6bd24806864d9b483fb4c30bbf825
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9739a5c84a65a183aaceb019fc9dc20dbfaebb0005316af3f80f4ee56607267
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19dddc0b6cb2d502dca9d4c1f1d4c7210ecd9950ff974605260df48030e6adc4
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d62fa0bd2b102f25022547f65a7a9db422bb2faff81778ad61c0ad62f909ebdf
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b72b376989328d88f7204f8c2450c92ad968c5171aceb25802134dcad1bc175
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35af6438c2a92d9973acad6d80d8a4a33fdacd6d5b73017506a1248bcd26eb07
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9414d137ecdfe150cd36a229c2ee6d6c2d8e0dc3efcbc20a96e2225ff5299c68
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad4e4728265849a5c8f503e18c55a53f2fb6db704986db6665c2db1ebdbba252
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ae5239404ab97e0fcf1d64e94bd56d195792c49faa00afb9e436068b35b4aef
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.19664782285690308,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 1.639344262295082,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,153 @@
198
  "eval_samples_per_second": 14.617,
199
  "eval_steps_per_second": 3.8,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +368,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 3.315690861756416e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
  "best_metric": 0.19664782285690308,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 3.0163934426229506,
5
  "eval_steps": 25,
6
+ "global_step": 46,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 14.617,
199
  "eval_steps_per_second": 3.8,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 1.7049180327868854,
204
+ "grad_norm": 0.16763406991958618,
205
+ "learning_rate": 4.859583227770218e-05,
206
+ "loss": 0.3129,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 1.7704918032786885,
211
+ "grad_norm": 0.10168012976646423,
212
+ "learning_rate": 4.543456197011605e-05,
213
+ "loss": 0.1769,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 1.8360655737704918,
218
+ "grad_norm": 0.0870274230837822,
219
+ "learning_rate": 4.232203494213567e-05,
220
+ "loss": 0.1408,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 1.901639344262295,
225
+ "grad_norm": 0.12161073088645935,
226
+ "learning_rate": 3.927411191804058e-05,
227
+ "loss": 0.206,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 1.9672131147540983,
232
+ "grad_norm": 0.18406201899051666,
233
+ "learning_rate": 3.630632441491512e-05,
234
+ "loss": 0.3345,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 2.0327868852459017,
239
+ "grad_norm": 0.2242753803730011,
240
+ "learning_rate": 3.343379559759746e-05,
241
+ "loss": 0.3352,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 2.098360655737705,
246
+ "grad_norm": 0.07899916917085648,
247
+ "learning_rate": 3.067116321449813e-05,
248
+ "loss": 0.122,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 2.1639344262295084,
253
+ "grad_norm": 0.12275606393814087,
254
+ "learning_rate": 2.803250500698939e-05,
255
+ "loss": 0.194,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 2.2295081967213113,
260
+ "grad_norm": 0.13614587485790253,
261
+ "learning_rate": 2.5531266972462177e-05,
262
+ "loss": 0.2503,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 2.2950819672131146,
267
+ "grad_norm": 0.11481458693742752,
268
+ "learning_rate": 2.3180194846605367e-05,
269
+ "loss": 0.1806,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 2.360655737704918,
274
+ "grad_norm": 0.09330334514379501,
275
+ "learning_rate": 2.0991269154058385e-05,
276
+ "loss": 0.1425,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 2.4262295081967213,
281
+ "grad_norm": 0.1448790580034256,
282
+ "learning_rate": 1.897564415840379e-05,
283
+ "loss": 0.2288,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 2.4918032786885247,
288
+ "grad_norm": 0.12863922119140625,
289
+ "learning_rate": 1.7143591022596845e-05,
290
+ "loss": 0.181,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 2.557377049180328,
295
+ "grad_norm": 0.10539160668849945,
296
+ "learning_rate": 1.5504445469473496e-05,
297
+ "loss": 0.1764,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 2.6229508196721314,
302
+ "grad_norm": 0.1047457829117775,
303
+ "learning_rate": 1.4066560209046673e-05,
304
+ "loss": 0.1523,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 2.6885245901639343,
309
+ "grad_norm": 0.15245206654071808,
310
+ "learning_rate": 1.2837262375010731e-05,
311
+ "loss": 0.2268,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 2.7540983606557377,
316
+ "grad_norm": 0.12327948957681656,
317
+ "learning_rate": 1.1822816187347623e-05,
318
+ "loss": 0.1537,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 2.819672131147541,
323
+ "grad_norm": 0.08937690407037735,
324
+ "learning_rate": 1.1028391031297826e-05,
325
+ "loss": 0.1227,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 2.8852459016393444,
330
+ "grad_norm": 0.11844687163829803,
331
+ "learning_rate": 1.0458035115358032e-05,
332
+ "loss": 0.1708,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 2.9508196721311473,
337
+ "grad_norm": 0.18891659379005432,
338
+ "learning_rate": 1.0114654842538593e-05,
339
+ "loss": 0.2713,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 3.0163934426229506,
344
+ "grad_norm": 0.2762884795665741,
345
+ "learning_rate": 1e-05,
346
+ "loss": 0.3081,
347
+ "step": 46
348
  }
349
  ],
350
  "logging_steps": 1,
 
368
  "should_evaluate": false,
369
  "should_log": false,
370
  "should_save": true,
371
+ "should_training_stop": true
372
  },
373
  "attributes": {}
374
  }
375
  },
376
+ "total_flos": 6.100871185631805e+17,
377
  "train_batch_size": 2,
378
  "trial_name": null,
379
  "trial_params": null