hosseinbv's picture
Uploading /ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-19
1b36299 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.05959475566150179,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011918951132300357,
"grad_norm": 2.9031435226589246,
"learning_rate": 2e-05,
"loss": 2.1072,
"step": 1
},
{
"epoch": 0.0023837902264600714,
"grad_norm": 2.7150248182983923,
"learning_rate": 1.9979453927503366e-05,
"loss": 2.101,
"step": 2
},
{
"epoch": 0.003575685339690107,
"grad_norm": 1.6481585515596062,
"learning_rate": 1.991790013823246e-05,
"loss": 2.0284,
"step": 3
},
{
"epoch": 0.004767580452920143,
"grad_norm": 2.290158944264602,
"learning_rate": 1.9815591569910654e-05,
"loss": 2.0343,
"step": 4
},
{
"epoch": 0.0059594755661501785,
"grad_norm": 1.620010410362664,
"learning_rate": 1.9672948630390296e-05,
"loss": 2.022,
"step": 5
},
{
"epoch": 0.007151370679380214,
"grad_norm": 1.2963689571694739,
"learning_rate": 1.949055747010669e-05,
"loss": 2.0615,
"step": 6
},
{
"epoch": 0.00834326579261025,
"grad_norm": 1.1986327536123562,
"learning_rate": 1.926916757346022e-05,
"loss": 2.0193,
"step": 7
},
{
"epoch": 0.009535160905840286,
"grad_norm": 0.7287087756055839,
"learning_rate": 1.900968867902419e-05,
"loss": 1.9835,
"step": 8
},
{
"epoch": 0.010727056019070322,
"grad_norm": 0.7584838316893223,
"learning_rate": 1.8713187041233896e-05,
"loss": 2.0048,
"step": 9
},
{
"epoch": 0.011918951132300357,
"grad_norm": 0.6198276917269713,
"learning_rate": 1.8380881048918406e-05,
"loss": 1.9825,
"step": 10
},
{
"epoch": 0.013110846245530394,
"grad_norm": 0.46373328707102957,
"learning_rate": 1.8014136218679566e-05,
"loss": 1.9862,
"step": 11
},
{
"epoch": 0.014302741358760428,
"grad_norm": 0.5577755717812845,
"learning_rate": 1.7614459583691346e-05,
"loss": 1.9776,
"step": 12
},
{
"epoch": 0.015494636471990465,
"grad_norm": 0.5939670606164611,
"learning_rate": 1.7183493500977277e-05,
"loss": 1.9674,
"step": 13
},
{
"epoch": 0.0166865315852205,
"grad_norm": 0.4628705732184792,
"learning_rate": 1.672300890261317e-05,
"loss": 1.9734,
"step": 14
},
{
"epoch": 0.017878426698450536,
"grad_norm": 0.485766773190629,
"learning_rate": 1.6234898018587336e-05,
"loss": 1.9846,
"step": 15
},
{
"epoch": 0.01907032181168057,
"grad_norm": 0.5223550727317597,
"learning_rate": 1.5721166601221697e-05,
"loss": 1.9647,
"step": 16
},
{
"epoch": 0.02026221692491061,
"grad_norm": 0.4433368830898489,
"learning_rate": 1.5183925683105254e-05,
"loss": 1.9842,
"step": 17
},
{
"epoch": 0.021454112038140644,
"grad_norm": 0.3532051409411447,
"learning_rate": 1.4625382902408356e-05,
"loss": 1.9792,
"step": 18
},
{
"epoch": 0.02264600715137068,
"grad_norm": 0.3750713272673884,
"learning_rate": 1.4047833431223938e-05,
"loss": 1.992,
"step": 19
},
{
"epoch": 0.023837902264600714,
"grad_norm": 0.3478672288559764,
"learning_rate": 1.3453650544213078e-05,
"loss": 2.0017,
"step": 20
},
{
"epoch": 0.025029797377830752,
"grad_norm": 0.3124495293347313,
"learning_rate": 1.2845275866310325e-05,
"loss": 1.9654,
"step": 21
},
{
"epoch": 0.026221692491060787,
"grad_norm": 0.30827848535728997,
"learning_rate": 1.2225209339563144e-05,
"loss": 1.9742,
"step": 22
},
{
"epoch": 0.027413587604290822,
"grad_norm": 0.3531950759587543,
"learning_rate": 1.1595998950333794e-05,
"loss": 1.9899,
"step": 23
},
{
"epoch": 0.028605482717520857,
"grad_norm": 0.3222872371524797,
"learning_rate": 1.0960230259076819e-05,
"loss": 1.982,
"step": 24
},
{
"epoch": 0.029797377830750895,
"grad_norm": 0.28066157317874146,
"learning_rate": 1.0320515775716556e-05,
"loss": 1.989,
"step": 25
},
{
"epoch": 0.03098927294398093,
"grad_norm": 0.28664391299284936,
"learning_rate": 9.67948422428345e-06,
"loss": 1.9926,
"step": 26
},
{
"epoch": 0.03218116805721097,
"grad_norm": 0.32690441539002585,
"learning_rate": 9.039769740923183e-06,
"loss": 1.9977,
"step": 27
},
{
"epoch": 0.033373063170441,
"grad_norm": 0.26400158066485074,
"learning_rate": 8.404001049666211e-06,
"loss": 2.0056,
"step": 28
},
{
"epoch": 0.03456495828367104,
"grad_norm": 0.23533776552109373,
"learning_rate": 7.774790660436857e-06,
"loss": 1.9724,
"step": 29
},
{
"epoch": 0.03575685339690107,
"grad_norm": 0.22178984088070275,
"learning_rate": 7.154724133689677e-06,
"loss": 1.9487,
"step": 30
},
{
"epoch": 0.03694874851013111,
"grad_norm": 0.23840728595761607,
"learning_rate": 6.546349455786926e-06,
"loss": 1.9871,
"step": 31
},
{
"epoch": 0.03814064362336114,
"grad_norm": 0.2270923288108359,
"learning_rate": 5.952166568776062e-06,
"loss": 2.0023,
"step": 32
},
{
"epoch": 0.03933253873659118,
"grad_norm": 0.21128333879501035,
"learning_rate": 5.37461709759165e-06,
"loss": 1.9781,
"step": 33
},
{
"epoch": 0.04052443384982122,
"grad_norm": 0.20025698249162766,
"learning_rate": 4.81607431689475e-06,
"loss": 2.0167,
"step": 34
},
{
"epoch": 0.041716328963051254,
"grad_norm": 0.19545125950438635,
"learning_rate": 4.278833398778306e-06,
"loss": 2.0014,
"step": 35
},
{
"epoch": 0.04290822407628129,
"grad_norm": 0.21003372321244757,
"learning_rate": 3.7651019814126656e-06,
"loss": 2.0307,
"step": 36
},
{
"epoch": 0.04410011918951132,
"grad_norm": 0.20875875072814898,
"learning_rate": 3.2769910973868314e-06,
"loss": 1.9922,
"step": 37
},
{
"epoch": 0.04529201430274136,
"grad_norm": 0.20086863185331044,
"learning_rate": 2.8165064990227255e-06,
"loss": 2.009,
"step": 38
},
{
"epoch": 0.04648390941597139,
"grad_norm": 0.19637651354676247,
"learning_rate": 2.3855404163086558e-06,
"loss": 2.0069,
"step": 39
},
{
"epoch": 0.04767580452920143,
"grad_norm": 0.19298356773047148,
"learning_rate": 1.9858637813204352e-06,
"loss": 1.9649,
"step": 40
},
{
"epoch": 0.04886769964243146,
"grad_norm": 0.18616123249372152,
"learning_rate": 1.6191189510815942e-06,
"loss": 1.988,
"step": 41
},
{
"epoch": 0.050059594755661505,
"grad_norm": 0.17608440957049246,
"learning_rate": 1.286812958766106e-06,
"loss": 1.9493,
"step": 42
},
{
"epoch": 0.05125148986889154,
"grad_norm": 0.17839806165567934,
"learning_rate": 9.903113209758098e-07,
"loss": 1.9827,
"step": 43
},
{
"epoch": 0.052443384982121574,
"grad_norm": 0.171992208968052,
"learning_rate": 7.308324265397837e-07,
"loss": 2.0005,
"step": 44
},
{
"epoch": 0.05363528009535161,
"grad_norm": 0.20157026593925115,
"learning_rate": 5.094425298933136e-07,
"loss": 1.9962,
"step": 45
},
{
"epoch": 0.054827175208581644,
"grad_norm": 0.17529676457850735,
"learning_rate": 3.2705136960970554e-07,
"loss": 2.0125,
"step": 46
},
{
"epoch": 0.05601907032181168,
"grad_norm": 0.1769836489001342,
"learning_rate": 1.844084300893456e-07,
"loss": 1.9917,
"step": 47
},
{
"epoch": 0.057210965435041714,
"grad_norm": 0.17274305549148233,
"learning_rate": 8.209986176753947e-08,
"loss": 1.9941,
"step": 48
},
{
"epoch": 0.058402860548271755,
"grad_norm": 0.1777317862287471,
"learning_rate": 2.054607249663665e-08,
"loss": 1.9912,
"step": 49
},
{
"epoch": 0.05959475566150179,
"grad_norm": 0.17680142652753666,
"learning_rate": 0.0,
"loss": 2.0301,
"step": 50
},
{
"epoch": 0.05959475566150179,
"step": 50,
"total_flos": 106890730143744.0,
"train_loss": 1.9979763078689574,
"train_runtime": 1596.1705,
"train_samples_per_second": 58.139,
"train_steps_per_second": 0.031
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 106890730143744.0,
"train_batch_size": 58,
"trial_name": null,
"trial_params": null
}