|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.05959475566150179, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0011918951132300357, |
|
"grad_norm": 1.8323841453219294, |
|
"learning_rate": 2e-05, |
|
"loss": 2.1039, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0023837902264600714, |
|
"grad_norm": 1.837920719204094, |
|
"learning_rate": 1.9979453927503366e-05, |
|
"loss": 2.1015, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003575685339690107, |
|
"grad_norm": 0.9173452411852326, |
|
"learning_rate": 1.991790013823246e-05, |
|
"loss": 2.0311, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004767580452920143, |
|
"grad_norm": 1.265146161082013, |
|
"learning_rate": 1.9815591569910654e-05, |
|
"loss": 2.0187, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0059594755661501785, |
|
"grad_norm": 1.0055189392399428, |
|
"learning_rate": 1.9672948630390296e-05, |
|
"loss": 2.0142, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007151370679380214, |
|
"grad_norm": 1.6116133535057522, |
|
"learning_rate": 1.949055747010669e-05, |
|
"loss": 2.047, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00834326579261025, |
|
"grad_norm": 1.0164569935770877, |
|
"learning_rate": 1.926916757346022e-05, |
|
"loss": 2.0114, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009535160905840286, |
|
"grad_norm": 0.8088386058968114, |
|
"learning_rate": 1.900968867902419e-05, |
|
"loss": 1.979, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010727056019070322, |
|
"grad_norm": 0.7238192809262382, |
|
"learning_rate": 1.8713187041233896e-05, |
|
"loss": 1.9969, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.011918951132300357, |
|
"grad_norm": 0.5757195660436086, |
|
"learning_rate": 1.8380881048918406e-05, |
|
"loss": 1.9719, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.013110846245530394, |
|
"grad_norm": 0.6136131835485481, |
|
"learning_rate": 1.8014136218679566e-05, |
|
"loss": 1.9736, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.014302741358760428, |
|
"grad_norm": 0.6462531692000065, |
|
"learning_rate": 1.7614459583691346e-05, |
|
"loss": 1.9625, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.015494636471990465, |
|
"grad_norm": 0.5160196738824037, |
|
"learning_rate": 1.7183493500977277e-05, |
|
"loss": 1.9524, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0166865315852205, |
|
"grad_norm": 0.48508132154258404, |
|
"learning_rate": 1.672300890261317e-05, |
|
"loss": 1.9584, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.017878426698450536, |
|
"grad_norm": 0.5181005310771087, |
|
"learning_rate": 1.6234898018587336e-05, |
|
"loss": 1.967, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01907032181168057, |
|
"grad_norm": 0.4150834616993424, |
|
"learning_rate": 1.5721166601221697e-05, |
|
"loss": 1.9456, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02026221692491061, |
|
"grad_norm": 0.3379165251881648, |
|
"learning_rate": 1.5183925683105254e-05, |
|
"loss": 1.9637, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.021454112038140644, |
|
"grad_norm": 0.3484808167274383, |
|
"learning_rate": 1.4625382902408356e-05, |
|
"loss": 1.9582, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02264600715137068, |
|
"grad_norm": 0.35126742165373703, |
|
"learning_rate": 1.4047833431223938e-05, |
|
"loss": 1.9716, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.023837902264600714, |
|
"grad_norm": 0.2995298854480071, |
|
"learning_rate": 1.3453650544213078e-05, |
|
"loss": 1.9804, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.025029797377830752, |
|
"grad_norm": 0.2812553357334388, |
|
"learning_rate": 1.2845275866310325e-05, |
|
"loss": 1.943, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.026221692491060787, |
|
"grad_norm": 0.29784459167074245, |
|
"learning_rate": 1.2225209339563144e-05, |
|
"loss": 1.9517, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.027413587604290822, |
|
"grad_norm": 0.28759384963571305, |
|
"learning_rate": 1.1595998950333794e-05, |
|
"loss": 1.9676, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.028605482717520857, |
|
"grad_norm": 0.26154732718281476, |
|
"learning_rate": 1.0960230259076819e-05, |
|
"loss": 1.9585, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.029797377830750895, |
|
"grad_norm": 0.2417322199141499, |
|
"learning_rate": 1.0320515775716556e-05, |
|
"loss": 1.9658, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03098927294398093, |
|
"grad_norm": 0.2647008811703257, |
|
"learning_rate": 9.67948422428345e-06, |
|
"loss": 1.9677, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03218116805721097, |
|
"grad_norm": 0.28156330533090274, |
|
"learning_rate": 9.039769740923183e-06, |
|
"loss": 1.971, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.033373063170441, |
|
"grad_norm": 0.23451944041424483, |
|
"learning_rate": 8.404001049666211e-06, |
|
"loss": 1.9777, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03456495828367104, |
|
"grad_norm": 0.20425845712936946, |
|
"learning_rate": 7.774790660436857e-06, |
|
"loss": 1.9455, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03575685339690107, |
|
"grad_norm": 0.20003632743257047, |
|
"learning_rate": 7.154724133689677e-06, |
|
"loss": 1.9219, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03694874851013111, |
|
"grad_norm": 0.22248700808509062, |
|
"learning_rate": 6.546349455786926e-06, |
|
"loss": 1.9595, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03814064362336114, |
|
"grad_norm": 0.22141566358189815, |
|
"learning_rate": 5.952166568776062e-06, |
|
"loss": 1.9736, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03933253873659118, |
|
"grad_norm": 0.20350762417692123, |
|
"learning_rate": 5.37461709759165e-06, |
|
"loss": 1.9491, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04052443384982122, |
|
"grad_norm": 0.18362905257668707, |
|
"learning_rate": 4.81607431689475e-06, |
|
"loss": 1.9877, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.041716328963051254, |
|
"grad_norm": 0.18209143534810343, |
|
"learning_rate": 4.278833398778306e-06, |
|
"loss": 1.9711, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04290822407628129, |
|
"grad_norm": 0.1870043994840097, |
|
"learning_rate": 3.7651019814126656e-06, |
|
"loss": 1.9997, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04410011918951132, |
|
"grad_norm": 0.18563803773188292, |
|
"learning_rate": 3.2769910973868314e-06, |
|
"loss": 1.9611, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04529201430274136, |
|
"grad_norm": 0.1803488628139512, |
|
"learning_rate": 2.8165064990227255e-06, |
|
"loss": 1.9779, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04648390941597139, |
|
"grad_norm": 0.18280901111654344, |
|
"learning_rate": 2.3855404163086558e-06, |
|
"loss": 1.9753, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04767580452920143, |
|
"grad_norm": 0.18003744910780412, |
|
"learning_rate": 1.9858637813204352e-06, |
|
"loss": 1.9327, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04886769964243146, |
|
"grad_norm": 0.1751954707202772, |
|
"learning_rate": 1.6191189510815942e-06, |
|
"loss": 1.9578, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.050059594755661505, |
|
"grad_norm": 0.17152593814056669, |
|
"learning_rate": 1.286812958766106e-06, |
|
"loss": 1.9176, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05125148986889154, |
|
"grad_norm": 0.16959349088820902, |
|
"learning_rate": 9.903113209758098e-07, |
|
"loss": 1.9505, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.052443384982121574, |
|
"grad_norm": 0.16660058312767648, |
|
"learning_rate": 7.308324265397837e-07, |
|
"loss": 1.9683, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05363528009535161, |
|
"grad_norm": 0.17674746764132468, |
|
"learning_rate": 5.094425298933136e-07, |
|
"loss": 1.9643, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.054827175208581644, |
|
"grad_norm": 0.16795370746145422, |
|
"learning_rate": 3.2705136960970554e-07, |
|
"loss": 1.9812, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05601907032181168, |
|
"grad_norm": 0.17056366756389463, |
|
"learning_rate": 1.844084300893456e-07, |
|
"loss": 1.959, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.057210965435041714, |
|
"grad_norm": 0.16170892183194993, |
|
"learning_rate": 8.209986176753947e-08, |
|
"loss": 1.9624, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.058402860548271755, |
|
"grad_norm": 0.17173937225335953, |
|
"learning_rate": 2.054607249663665e-08, |
|
"loss": 1.9578, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05959475566150179, |
|
"grad_norm": 0.17021831986392572, |
|
"learning_rate": 0.0, |
|
"loss": 1.9973, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05959475566150179, |
|
"step": 50, |
|
"total_flos": 106890730143744.0, |
|
"train_loss": 1.9756734085083008, |
|
"train_runtime": 1598.4061, |
|
"train_samples_per_second": 58.058, |
|
"train_steps_per_second": 0.031 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 106890730143744.0, |
|
"train_batch_size": 58, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|