lesso02's picture
Training in progress, step 500, checkpoint
ee6c26c verified
{
"best_metric": 0.8017581105232239,
"best_model_checkpoint": "miner_id_24/checkpoint-500",
"epoch": 0.23889154323936931,
"eval_steps": 50,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00047778308647873863,
"eval_loss": 4.253282070159912,
"eval_runtime": 11.3656,
"eval_samples_per_second": 77.603,
"eval_steps_per_second": 19.445,
"step": 1
},
{
"epoch": 0.004777830864787387,
"grad_norm": 83.29377746582031,
"learning_rate": 4.0400000000000006e-05,
"loss": 7.0413,
"step": 10
},
{
"epoch": 0.009555661729574774,
"grad_norm": 136.3715362548828,
"learning_rate": 8.080000000000001e-05,
"loss": 4.7053,
"step": 20
},
{
"epoch": 0.01433349259436216,
"grad_norm": 225.83673095703125,
"learning_rate": 0.00012119999999999999,
"loss": 4.1351,
"step": 30
},
{
"epoch": 0.019111323459149548,
"grad_norm": 87.71796417236328,
"learning_rate": 0.00016160000000000002,
"loss": 3.5961,
"step": 40
},
{
"epoch": 0.023889154323936932,
"grad_norm": 166.2051239013672,
"learning_rate": 0.000202,
"loss": 3.1402,
"step": 50
},
{
"epoch": 0.023889154323936932,
"eval_loss": 1.578347086906433,
"eval_runtime": 11.3459,
"eval_samples_per_second": 77.738,
"eval_steps_per_second": 19.478,
"step": 50
},
{
"epoch": 0.02866698518872432,
"grad_norm": 84.7917251586914,
"learning_rate": 0.00020175396907624226,
"loss": 2.7784,
"step": 60
},
{
"epoch": 0.033444816053511704,
"grad_norm": 64.9135971069336,
"learning_rate": 0.0002010170749428986,
"loss": 2.3735,
"step": 70
},
{
"epoch": 0.038222646918299095,
"grad_norm": 59.275482177734375,
"learning_rate": 0.00019979290767411438,
"loss": 2.4902,
"step": 80
},
{
"epoch": 0.04300047778308648,
"grad_norm": 20.444557189941406,
"learning_rate": 0.0001980874312897702,
"loss": 3.0005,
"step": 90
},
{
"epoch": 0.047778308647873864,
"grad_norm": 21.86193084716797,
"learning_rate": 0.00019590895469937675,
"loss": 2.985,
"step": 100
},
{
"epoch": 0.047778308647873864,
"eval_loss": 1.178714632987976,
"eval_runtime": 11.6295,
"eval_samples_per_second": 75.841,
"eval_steps_per_second": 19.003,
"step": 100
},
{
"epoch": 0.05255613951266125,
"grad_norm": 34.27466583251953,
"learning_rate": 0.0001932680912219027,
"loss": 1.8414,
"step": 110
},
{
"epoch": 0.05733397037744864,
"grad_norm": 19.224571228027344,
"learning_rate": 0.00019017770687875164,
"loss": 1.6868,
"step": 120
},
{
"epoch": 0.062111801242236024,
"grad_norm": 18.39223861694336,
"learning_rate": 0.000186652857711799,
"loss": 2.1108,
"step": 130
},
{
"epoch": 0.06688963210702341,
"grad_norm": 28.29250717163086,
"learning_rate": 0.00018271071643186968,
"loss": 2.3566,
"step": 140
},
{
"epoch": 0.0716674629718108,
"grad_norm": 21.852407455444336,
"learning_rate": 0.00017837048875501678,
"loss": 2.6554,
"step": 150
},
{
"epoch": 0.0716674629718108,
"eval_loss": 1.1248341798782349,
"eval_runtime": 11.4044,
"eval_samples_per_second": 77.338,
"eval_steps_per_second": 19.378,
"step": 150
},
{
"epoch": 0.07644529383659819,
"grad_norm": 36.45707321166992,
"learning_rate": 0.00017365331983420376,
"loss": 1.5446,
"step": 160
},
{
"epoch": 0.08122312470138557,
"grad_norm": 39.37006378173828,
"learning_rate": 0.0001685821912422447,
"loss": 1.5816,
"step": 170
},
{
"epoch": 0.08600095556617296,
"grad_norm": 52.60810470581055,
"learning_rate": 0.00016318180900789148,
"loss": 2.17,
"step": 180
},
{
"epoch": 0.09077878643096035,
"grad_norm": 85.5344467163086,
"learning_rate": 0.00015747848325054544,
"loss": 8.2239,
"step": 190
},
{
"epoch": 0.09555661729574773,
"grad_norm": 43.30970001220703,
"learning_rate": 0.0001515,
"loss": 3.5819,
"step": 200
},
{
"epoch": 0.09555661729574773,
"eval_loss": 1.198967695236206,
"eval_runtime": 11.4576,
"eval_samples_per_second": 76.979,
"eval_steps_per_second": 19.289,
"step": 200
},
{
"epoch": 0.10033444816053512,
"grad_norm": 17.08075523376465,
"learning_rate": 0.00014527548582569683,
"loss": 2.0191,
"step": 210
},
{
"epoch": 0.1051122790253225,
"grad_norm": 24.818105697631836,
"learning_rate": 0.00013883526593500714,
"loss": 2.0323,
"step": 220
},
{
"epoch": 0.10989010989010989,
"grad_norm": 20.862939834594727,
"learning_rate": 0.0001322107164318697,
"loss": 2.0984,
"step": 230
},
{
"epoch": 0.11466794075489728,
"grad_norm": 14.932770729064941,
"learning_rate": 0.00012543411145556643,
"loss": 2.1092,
"step": 240
},
{
"epoch": 0.11944577161968466,
"grad_norm": 18.728755950927734,
"learning_rate": 0.00011853846594435998,
"loss": 2.4072,
"step": 250
},
{
"epoch": 0.11944577161968466,
"eval_loss": 1.1132290363311768,
"eval_runtime": 11.4625,
"eval_samples_per_second": 76.947,
"eval_steps_per_second": 19.28,
"step": 250
},
{
"epoch": 0.12422360248447205,
"grad_norm": 14.004402160644531,
"learning_rate": 0.00011155737479003301,
"loss": 1.5328,
"step": 260
},
{
"epoch": 0.12900143334925943,
"grad_norm": 15.66530704498291,
"learning_rate": 0.00010452484916695262,
"loss": 1.4901,
"step": 270
},
{
"epoch": 0.13377926421404682,
"grad_norm": 12.79644775390625,
"learning_rate": 9.747515083304742e-05,
"loss": 1.7414,
"step": 280
},
{
"epoch": 0.1385570950788342,
"grad_norm": 9.151662826538086,
"learning_rate": 9.044262520996702e-05,
"loss": 1.9799,
"step": 290
},
{
"epoch": 0.1433349259436216,
"grad_norm": 23.121509552001953,
"learning_rate": 8.346153405564004e-05,
"loss": 2.1323,
"step": 300
},
{
"epoch": 0.1433349259436216,
"eval_loss": 0.9465628862380981,
"eval_runtime": 11.3797,
"eval_samples_per_second": 77.506,
"eval_steps_per_second": 19.421,
"step": 300
},
{
"epoch": 0.148112756808409,
"grad_norm": 7.415276050567627,
"learning_rate": 7.656588854443357e-05,
"loss": 1.2979,
"step": 310
},
{
"epoch": 0.15289058767319638,
"grad_norm": 16.202434539794922,
"learning_rate": 6.978928356813031e-05,
"loss": 1.3666,
"step": 320
},
{
"epoch": 0.15766841853798375,
"grad_norm": 11.659475326538086,
"learning_rate": 6.316473406499288e-05,
"loss": 1.9387,
"step": 330
},
{
"epoch": 0.16244624940277114,
"grad_norm": 9.968153953552246,
"learning_rate": 5.672451417430317e-05,
"loss": 2.0627,
"step": 340
},
{
"epoch": 0.16722408026755853,
"grad_norm": 7.878597736358643,
"learning_rate": 5.050000000000002e-05,
"loss": 2.3506,
"step": 350
},
{
"epoch": 0.16722408026755853,
"eval_loss": 0.8945831656455994,
"eval_runtime": 11.4264,
"eval_samples_per_second": 77.189,
"eval_steps_per_second": 19.341,
"step": 350
},
{
"epoch": 0.17200191113234592,
"grad_norm": 6.64923620223999,
"learning_rate": 4.452151674945458e-05,
"loss": 1.3763,
"step": 360
},
{
"epoch": 0.1767797419971333,
"grad_norm": 9.263545989990234,
"learning_rate": 3.8818190992108515e-05,
"loss": 1.5087,
"step": 370
},
{
"epoch": 0.1815575728619207,
"grad_norm": 7.458861351013184,
"learning_rate": 3.3417808757755355e-05,
"loss": 1.6117,
"step": 380
},
{
"epoch": 0.18633540372670807,
"grad_norm": 21.572065353393555,
"learning_rate": 2.8346680165796253e-05,
"loss": 1.7446,
"step": 390
},
{
"epoch": 0.19111323459149546,
"grad_norm": 25.700347900390625,
"learning_rate": 2.362951124498323e-05,
"loss": 2.0516,
"step": 400
},
{
"epoch": 0.19111323459149546,
"eval_loss": 0.8264233469963074,
"eval_runtime": 11.443,
"eval_samples_per_second": 77.078,
"eval_steps_per_second": 19.313,
"step": 400
},
{
"epoch": 0.19589106545628285,
"grad_norm": 8.18686294555664,
"learning_rate": 1.928928356813032e-05,
"loss": 1.0687,
"step": 410
},
{
"epoch": 0.20066889632107024,
"grad_norm": 10.289799690246582,
"learning_rate": 1.5347142288200977e-05,
"loss": 1.4437,
"step": 420
},
{
"epoch": 0.20544672718585763,
"grad_norm": 12.29814338684082,
"learning_rate": 1.1822293121248375e-05,
"loss": 1.6965,
"step": 430
},
{
"epoch": 0.210224558050645,
"grad_norm": 15.640246391296387,
"learning_rate": 8.731908778097302e-06,
"loss": 1.8951,
"step": 440
},
{
"epoch": 0.21500238891543239,
"grad_norm": 7.856279373168945,
"learning_rate": 6.09104530062326e-06,
"loss": 1.9909,
"step": 450
},
{
"epoch": 0.21500238891543239,
"eval_loss": 0.8030063509941101,
"eval_runtime": 11.7799,
"eval_samples_per_second": 74.874,
"eval_steps_per_second": 18.761,
"step": 450
},
{
"epoch": 0.21978021978021978,
"grad_norm": 7.895440578460693,
"learning_rate": 3.912568710229791e-06,
"loss": 1.0214,
"step": 460
},
{
"epoch": 0.22455805064500717,
"grad_norm": 7.843092918395996,
"learning_rate": 2.2070923258856255e-06,
"loss": 1.2448,
"step": 470
},
{
"epoch": 0.22933588150979456,
"grad_norm": 7.881674766540527,
"learning_rate": 9.829250571013935e-07,
"loss": 1.603,
"step": 480
},
{
"epoch": 0.23411371237458195,
"grad_norm": 11.434523582458496,
"learning_rate": 2.4603092375775605e-07,
"loss": 1.8552,
"step": 490
},
{
"epoch": 0.23889154323936931,
"grad_norm": 9.036798477172852,
"learning_rate": 0.0,
"loss": 2.0245,
"step": 500
},
{
"epoch": 0.23889154323936931,
"eval_loss": 0.8017581105232239,
"eval_runtime": 11.7902,
"eval_samples_per_second": 74.808,
"eval_steps_per_second": 18.744,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4657011228672000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}