lesso02's picture
Training in progress, step 500, checkpoint
0864ab6 verified
{
"best_metric": 2.8705949783325195,
"best_model_checkpoint": "miner_id_24/checkpoint-500",
"epoch": 0.4317789291882556,
"eval_steps": 50,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008635578583765112,
"eval_loss": 4.056758880615234,
"eval_runtime": 6.4154,
"eval_samples_per_second": 76.067,
"eval_steps_per_second": 19.017,
"step": 1
},
{
"epoch": 0.008635578583765112,
"grad_norm": 109.97903442382812,
"learning_rate": 4.0400000000000006e-05,
"loss": 7.1856,
"step": 10
},
{
"epoch": 0.017271157167530225,
"grad_norm": 128.83065795898438,
"learning_rate": 8.080000000000001e-05,
"loss": 7.2879,
"step": 20
},
{
"epoch": 0.025906735751295335,
"grad_norm": 156.83480834960938,
"learning_rate": 0.00012119999999999999,
"loss": 6.7848,
"step": 30
},
{
"epoch": 0.03454231433506045,
"grad_norm": 94.2466812133789,
"learning_rate": 0.00016160000000000002,
"loss": 6.6317,
"step": 40
},
{
"epoch": 0.04317789291882556,
"grad_norm": 69.40169525146484,
"learning_rate": 0.000202,
"loss": 7.2513,
"step": 50
},
{
"epoch": 0.04317789291882556,
"eval_loss": 3.4254961013793945,
"eval_runtime": 6.4823,
"eval_samples_per_second": 75.282,
"eval_steps_per_second": 18.82,
"step": 50
},
{
"epoch": 0.05181347150259067,
"grad_norm": 69.7632064819336,
"learning_rate": 0.00020175396907624226,
"loss": 6.6028,
"step": 60
},
{
"epoch": 0.06044905008635579,
"grad_norm": 122.16877746582031,
"learning_rate": 0.0002010170749428986,
"loss": 5.9878,
"step": 70
},
{
"epoch": 0.0690846286701209,
"grad_norm": 137.6359100341797,
"learning_rate": 0.00019979290767411438,
"loss": 5.9063,
"step": 80
},
{
"epoch": 0.07772020725388601,
"grad_norm": 351.2722473144531,
"learning_rate": 0.0001980874312897702,
"loss": 6.7379,
"step": 90
},
{
"epoch": 0.08635578583765112,
"grad_norm": 46.962703704833984,
"learning_rate": 0.00019590895469937675,
"loss": 7.5918,
"step": 100
},
{
"epoch": 0.08635578583765112,
"eval_loss": 3.554054021835327,
"eval_runtime": 6.3322,
"eval_samples_per_second": 77.067,
"eval_steps_per_second": 19.267,
"step": 100
},
{
"epoch": 0.09499136442141623,
"grad_norm": 75.2490234375,
"learning_rate": 0.0001932680912219027,
"loss": 6.5258,
"step": 110
},
{
"epoch": 0.10362694300518134,
"grad_norm": 60.61024856567383,
"learning_rate": 0.00019017770687875164,
"loss": 6.6532,
"step": 120
},
{
"epoch": 0.11226252158894647,
"grad_norm": 64.55341339111328,
"learning_rate": 0.000186652857711799,
"loss": 6.1697,
"step": 130
},
{
"epoch": 0.12089810017271158,
"grad_norm": 98.4996566772461,
"learning_rate": 0.00018271071643186968,
"loss": 6.6974,
"step": 140
},
{
"epoch": 0.12953367875647667,
"grad_norm": 34.625057220458984,
"learning_rate": 0.00017837048875501678,
"loss": 6.6976,
"step": 150
},
{
"epoch": 0.12953367875647667,
"eval_loss": 3.2074270248413086,
"eval_runtime": 6.4564,
"eval_samples_per_second": 75.583,
"eval_steps_per_second": 18.896,
"step": 150
},
{
"epoch": 0.1381692573402418,
"grad_norm": 51.142398834228516,
"learning_rate": 0.00017365331983420376,
"loss": 6.4538,
"step": 160
},
{
"epoch": 0.14680483592400692,
"grad_norm": 60.982696533203125,
"learning_rate": 0.0001685821912422447,
"loss": 6.1183,
"step": 170
},
{
"epoch": 0.15544041450777202,
"grad_norm": 90.77964782714844,
"learning_rate": 0.00016318180900789148,
"loss": 6.2612,
"step": 180
},
{
"epoch": 0.16407599309153714,
"grad_norm": 37.461490631103516,
"learning_rate": 0.00015747848325054544,
"loss": 6.2806,
"step": 190
},
{
"epoch": 0.17271157167530224,
"grad_norm": 57.53730392456055,
"learning_rate": 0.0001515,
"loss": 6.4748,
"step": 200
},
{
"epoch": 0.17271157167530224,
"eval_loss": 3.1944408416748047,
"eval_runtime": 6.5289,
"eval_samples_per_second": 74.744,
"eval_steps_per_second": 18.686,
"step": 200
},
{
"epoch": 0.18134715025906736,
"grad_norm": 26.914474487304688,
"learning_rate": 0.00014527548582569683,
"loss": 6.1732,
"step": 210
},
{
"epoch": 0.18998272884283246,
"grad_norm": 42.907127380371094,
"learning_rate": 0.00013883526593500714,
"loss": 6.1321,
"step": 220
},
{
"epoch": 0.19861830742659758,
"grad_norm": 61.954986572265625,
"learning_rate": 0.0001322107164318697,
"loss": 6.293,
"step": 230
},
{
"epoch": 0.20725388601036268,
"grad_norm": 41.956809997558594,
"learning_rate": 0.00012543411145556643,
"loss": 6.3759,
"step": 240
},
{
"epoch": 0.2158894645941278,
"grad_norm": 58.70474624633789,
"learning_rate": 0.00011853846594435998,
"loss": 6.4753,
"step": 250
},
{
"epoch": 0.2158894645941278,
"eval_loss": 3.325136184692383,
"eval_runtime": 6.5343,
"eval_samples_per_second": 74.683,
"eval_steps_per_second": 18.671,
"step": 250
},
{
"epoch": 0.22452504317789293,
"grad_norm": 37.147239685058594,
"learning_rate": 0.00011155737479003301,
"loss": 6.4968,
"step": 260
},
{
"epoch": 0.23316062176165803,
"grad_norm": 28.960235595703125,
"learning_rate": 0.00010452484916695262,
"loss": 6.153,
"step": 270
},
{
"epoch": 0.24179620034542315,
"grad_norm": 50.646121978759766,
"learning_rate": 9.747515083304742e-05,
"loss": 6.4405,
"step": 280
},
{
"epoch": 0.2504317789291883,
"grad_norm": 61.160247802734375,
"learning_rate": 9.044262520996702e-05,
"loss": 5.6587,
"step": 290
},
{
"epoch": 0.25906735751295334,
"grad_norm": 126.3609848022461,
"learning_rate": 8.346153405564004e-05,
"loss": 6.0805,
"step": 300
},
{
"epoch": 0.25906735751295334,
"eval_loss": 3.0865094661712646,
"eval_runtime": 6.5059,
"eval_samples_per_second": 75.008,
"eval_steps_per_second": 18.752,
"step": 300
},
{
"epoch": 0.26770293609671847,
"grad_norm": 107.83584594726562,
"learning_rate": 7.656588854443357e-05,
"loss": 6.5431,
"step": 310
},
{
"epoch": 0.2763385146804836,
"grad_norm": 63.63986587524414,
"learning_rate": 6.978928356813031e-05,
"loss": 6.3199,
"step": 320
},
{
"epoch": 0.2849740932642487,
"grad_norm": 61.55841064453125,
"learning_rate": 6.316473406499288e-05,
"loss": 6.3271,
"step": 330
},
{
"epoch": 0.29360967184801384,
"grad_norm": 59.812896728515625,
"learning_rate": 5.672451417430317e-05,
"loss": 6.2607,
"step": 340
},
{
"epoch": 0.3022452504317789,
"grad_norm": 81.45887756347656,
"learning_rate": 5.050000000000002e-05,
"loss": 6.3402,
"step": 350
},
{
"epoch": 0.3022452504317789,
"eval_loss": 3.0198187828063965,
"eval_runtime": 6.5239,
"eval_samples_per_second": 74.802,
"eval_steps_per_second": 18.7,
"step": 350
},
{
"epoch": 0.31088082901554404,
"grad_norm": 37.4752082824707,
"learning_rate": 4.452151674945458e-05,
"loss": 6.201,
"step": 360
},
{
"epoch": 0.31951640759930916,
"grad_norm": 46.92196273803711,
"learning_rate": 3.8818190992108515e-05,
"loss": 5.6539,
"step": 370
},
{
"epoch": 0.3281519861830743,
"grad_norm": 111.72332763671875,
"learning_rate": 3.3417808757755355e-05,
"loss": 6.1284,
"step": 380
},
{
"epoch": 0.33678756476683935,
"grad_norm": 140.5625,
"learning_rate": 2.8346680165796253e-05,
"loss": 6.0232,
"step": 390
},
{
"epoch": 0.3454231433506045,
"grad_norm": 49.17171096801758,
"learning_rate": 2.362951124498323e-05,
"loss": 6.0123,
"step": 400
},
{
"epoch": 0.3454231433506045,
"eval_loss": 2.9118363857269287,
"eval_runtime": 6.5191,
"eval_samples_per_second": 74.857,
"eval_steps_per_second": 18.714,
"step": 400
},
{
"epoch": 0.3540587219343696,
"grad_norm": 94.65570068359375,
"learning_rate": 1.928928356813032e-05,
"loss": 5.9852,
"step": 410
},
{
"epoch": 0.3626943005181347,
"grad_norm": 134.4743194580078,
"learning_rate": 1.5347142288200977e-05,
"loss": 5.8302,
"step": 420
},
{
"epoch": 0.37132987910189985,
"grad_norm": 459.3276672363281,
"learning_rate": 1.1822293121248375e-05,
"loss": 5.3816,
"step": 430
},
{
"epoch": 0.3799654576856649,
"grad_norm": 118.11572265625,
"learning_rate": 8.731908778097302e-06,
"loss": 5.7221,
"step": 440
},
{
"epoch": 0.38860103626943004,
"grad_norm": 46.101783752441406,
"learning_rate": 6.09104530062326e-06,
"loss": 5.6217,
"step": 450
},
{
"epoch": 0.38860103626943004,
"eval_loss": 2.882382869720459,
"eval_runtime": 6.5287,
"eval_samples_per_second": 74.746,
"eval_steps_per_second": 18.687,
"step": 450
},
{
"epoch": 0.39723661485319517,
"grad_norm": 48.339908599853516,
"learning_rate": 3.912568710229791e-06,
"loss": 6.0256,
"step": 460
},
{
"epoch": 0.4058721934369603,
"grad_norm": 83.55408477783203,
"learning_rate": 2.2070923258856255e-06,
"loss": 5.9578,
"step": 470
},
{
"epoch": 0.41450777202072536,
"grad_norm": 55.561649322509766,
"learning_rate": 9.829250571013935e-07,
"loss": 5.9324,
"step": 480
},
{
"epoch": 0.4231433506044905,
"grad_norm": 66.6230239868164,
"learning_rate": 2.4603092375775605e-07,
"loss": 5.6569,
"step": 490
},
{
"epoch": 0.4317789291882556,
"grad_norm": 116.90052032470703,
"learning_rate": 0.0,
"loss": 6.1575,
"step": 500
},
{
"epoch": 0.4317789291882556,
"eval_loss": 2.8705949783325195,
"eval_runtime": 6.5205,
"eval_samples_per_second": 74.841,
"eval_steps_per_second": 18.71,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4657011228672000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}