hosseinbv's picture
Uploading /ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-18
3ada004 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.05959475566150179,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011918951132300357,
"grad_norm": 3.1771470700380333,
"learning_rate": 2e-05,
"loss": 2.1431,
"step": 1
},
{
"epoch": 0.0023837902264600714,
"grad_norm": 3.1161312627163005,
"learning_rate": 1.9979453927503366e-05,
"loss": 2.138,
"step": 2
},
{
"epoch": 0.003575685339690107,
"grad_norm": 1.264785017261071,
"learning_rate": 1.991790013823246e-05,
"loss": 2.0438,
"step": 3
},
{
"epoch": 0.004767580452920143,
"grad_norm": 3.409228751017024,
"learning_rate": 1.9815591569910654e-05,
"loss": 2.0838,
"step": 4
},
{
"epoch": 0.0059594755661501785,
"grad_norm": 2.7305256748776907,
"learning_rate": 1.9672948630390296e-05,
"loss": 2.0696,
"step": 5
},
{
"epoch": 0.007151370679380214,
"grad_norm": 1.8763103868114503,
"learning_rate": 1.949055747010669e-05,
"loss": 2.0977,
"step": 6
},
{
"epoch": 0.00834326579261025,
"grad_norm": 1.5468453869545318,
"learning_rate": 1.926916757346022e-05,
"loss": 2.0518,
"step": 7
},
{
"epoch": 0.009535160905840286,
"grad_norm": 1.0642293866767805,
"learning_rate": 1.900968867902419e-05,
"loss": 2.0096,
"step": 8
},
{
"epoch": 0.010727056019070322,
"grad_norm": 0.780500699075983,
"learning_rate": 1.8713187041233896e-05,
"loss": 2.0271,
"step": 9
},
{
"epoch": 0.011918951132300357,
"grad_norm": 0.764005329228154,
"learning_rate": 1.8380881048918406e-05,
"loss": 2.0058,
"step": 10
},
{
"epoch": 0.013110846245530394,
"grad_norm": 0.6241582333904159,
"learning_rate": 1.8014136218679566e-05,
"loss": 2.01,
"step": 11
},
{
"epoch": 0.014302741358760428,
"grad_norm": 0.5902053742685855,
"learning_rate": 1.7614459583691346e-05,
"loss": 2.0019,
"step": 12
},
{
"epoch": 0.015494636471990465,
"grad_norm": 0.6425310694189268,
"learning_rate": 1.7183493500977277e-05,
"loss": 1.9932,
"step": 13
},
{
"epoch": 0.0166865315852205,
"grad_norm": 0.5309446367223852,
"learning_rate": 1.672300890261317e-05,
"loss": 1.9988,
"step": 14
},
{
"epoch": 0.017878426698450536,
"grad_norm": 0.5138876585539982,
"learning_rate": 1.6234898018587336e-05,
"loss": 2.0101,
"step": 15
},
{
"epoch": 0.01907032181168057,
"grad_norm": 0.5392570440013659,
"learning_rate": 1.5721166601221697e-05,
"loss": 1.9911,
"step": 16
},
{
"epoch": 0.02026221692491061,
"grad_norm": 0.5029299842352577,
"learning_rate": 1.5183925683105254e-05,
"loss": 2.0116,
"step": 17
},
{
"epoch": 0.021454112038140644,
"grad_norm": 0.43594035780610685,
"learning_rate": 1.4625382902408356e-05,
"loss": 2.0073,
"step": 18
},
{
"epoch": 0.02264600715137068,
"grad_norm": 0.41696447792308583,
"learning_rate": 1.4047833431223938e-05,
"loss": 2.0208,
"step": 19
},
{
"epoch": 0.023837902264600714,
"grad_norm": 0.3892062305227489,
"learning_rate": 1.3453650544213078e-05,
"loss": 2.031,
"step": 20
},
{
"epoch": 0.025029797377830752,
"grad_norm": 0.3632073887411339,
"learning_rate": 1.2845275866310325e-05,
"loss": 1.9957,
"step": 21
},
{
"epoch": 0.026221692491060787,
"grad_norm": 0.33796099940642593,
"learning_rate": 1.2225209339563144e-05,
"loss": 2.0049,
"step": 22
},
{
"epoch": 0.027413587604290822,
"grad_norm": 0.32832752521945185,
"learning_rate": 1.1595998950333794e-05,
"loss": 2.02,
"step": 23
},
{
"epoch": 0.028605482717520857,
"grad_norm": 0.31678859599430514,
"learning_rate": 1.0960230259076819e-05,
"loss": 2.0122,
"step": 24
},
{
"epoch": 0.029797377830750895,
"grad_norm": 0.3054795643275271,
"learning_rate": 1.0320515775716556e-05,
"loss": 2.0194,
"step": 25
},
{
"epoch": 0.03098927294398093,
"grad_norm": 0.30476969314662167,
"learning_rate": 9.67948422428345e-06,
"loss": 2.0244,
"step": 26
},
{
"epoch": 0.03218116805721097,
"grad_norm": 0.3397762952406562,
"learning_rate": 9.039769740923183e-06,
"loss": 2.0301,
"step": 27
},
{
"epoch": 0.033373063170441,
"grad_norm": 0.28294935774392,
"learning_rate": 8.404001049666211e-06,
"loss": 2.039,
"step": 28
},
{
"epoch": 0.03456495828367104,
"grad_norm": 0.2677495861482238,
"learning_rate": 7.774790660436857e-06,
"loss": 2.0065,
"step": 29
},
{
"epoch": 0.03575685339690107,
"grad_norm": 0.2523404664659208,
"learning_rate": 7.154724133689677e-06,
"loss": 1.9824,
"step": 30
},
{
"epoch": 0.03694874851013111,
"grad_norm": 0.2677420506485163,
"learning_rate": 6.546349455786926e-06,
"loss": 2.0211,
"step": 31
},
{
"epoch": 0.03814064362336114,
"grad_norm": 0.25403279083921193,
"learning_rate": 5.952166568776062e-06,
"loss": 2.0364,
"step": 32
},
{
"epoch": 0.03933253873659118,
"grad_norm": 0.23827255140088083,
"learning_rate": 5.37461709759165e-06,
"loss": 2.0124,
"step": 33
},
{
"epoch": 0.04052443384982122,
"grad_norm": 0.21241476817661348,
"learning_rate": 4.81607431689475e-06,
"loss": 2.0512,
"step": 34
},
{
"epoch": 0.041716328963051254,
"grad_norm": 0.209552693603396,
"learning_rate": 4.278833398778306e-06,
"loss": 2.0375,
"step": 35
},
{
"epoch": 0.04290822407628129,
"grad_norm": 0.21360560735074388,
"learning_rate": 3.7651019814126656e-06,
"loss": 2.0665,
"step": 36
},
{
"epoch": 0.04410011918951132,
"grad_norm": 0.2182223081631352,
"learning_rate": 3.2769910973868314e-06,
"loss": 2.0289,
"step": 37
},
{
"epoch": 0.04529201430274136,
"grad_norm": 0.21248420749052188,
"learning_rate": 2.8165064990227255e-06,
"loss": 2.0451,
"step": 38
},
{
"epoch": 0.04648390941597139,
"grad_norm": 0.20688489883738986,
"learning_rate": 2.3855404163086558e-06,
"loss": 2.0445,
"step": 39
},
{
"epoch": 0.04767580452920143,
"grad_norm": 0.20278373082128726,
"learning_rate": 1.9858637813204352e-06,
"loss": 2.0017,
"step": 40
},
{
"epoch": 0.04886769964243146,
"grad_norm": 0.20097941188780546,
"learning_rate": 1.6191189510815942e-06,
"loss": 2.0239,
"step": 41
},
{
"epoch": 0.050059594755661505,
"grad_norm": 0.19492229623743026,
"learning_rate": 1.286812958766106e-06,
"loss": 1.9878,
"step": 42
},
{
"epoch": 0.05125148986889154,
"grad_norm": 0.19191563010483195,
"learning_rate": 9.903113209758098e-07,
"loss": 2.0205,
"step": 43
},
{
"epoch": 0.052443384982121574,
"grad_norm": 0.18895682704532601,
"learning_rate": 7.308324265397837e-07,
"loss": 2.0377,
"step": 44
},
{
"epoch": 0.05363528009535161,
"grad_norm": 0.2270168441556834,
"learning_rate": 5.094425298933136e-07,
"loss": 2.034,
"step": 45
},
{
"epoch": 0.054827175208581644,
"grad_norm": 0.1944028112406021,
"learning_rate": 3.2705136960970554e-07,
"loss": 2.0496,
"step": 46
},
{
"epoch": 0.05601907032181168,
"grad_norm": 0.18999114531888883,
"learning_rate": 1.844084300893456e-07,
"loss": 2.0299,
"step": 47
},
{
"epoch": 0.057210965435041714,
"grad_norm": 0.19083668329491307,
"learning_rate": 8.209986176753947e-08,
"loss": 2.0317,
"step": 48
},
{
"epoch": 0.058402860548271755,
"grad_norm": 0.19161290816553783,
"learning_rate": 2.054607249663665e-08,
"loss": 2.0296,
"step": 49
},
{
"epoch": 0.05959475566150179,
"grad_norm": 0.19193914325325095,
"learning_rate": 0.0,
"loss": 2.068,
"step": 50
},
{
"epoch": 0.05959475566150179,
"step": 50,
"total_flos": 106890730143744.0,
"train_loss": 2.030773923397064,
"train_runtime": 1597.0722,
"train_samples_per_second": 58.106,
"train_steps_per_second": 0.031
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 106890730143744.0,
"train_batch_size": 58,
"trial_name": null,
"trial_params": null
}