ToastyPigeon's picture
Training in progress, step 59, checkpoint
f0f5e12 verified
raw
history blame
11.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.20432900432900433,
"eval_steps": 59,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003463203463203463,
"grad_norm": 0.26726240342863167,
"learning_rate": 5e-06,
"loss": 1.8182,
"step": 1
},
{
"epoch": 0.003463203463203463,
"eval_loss": 2.1284499168395996,
"eval_runtime": 293.5739,
"eval_samples_per_second": 0.341,
"eval_steps_per_second": 0.17,
"step": 1
},
{
"epoch": 0.006926406926406926,
"grad_norm": 0.20034662456059837,
"learning_rate": 1e-05,
"loss": 1.9101,
"step": 2
},
{
"epoch": 0.01038961038961039,
"grad_norm": 0.23729183422165226,
"learning_rate": 1.5e-05,
"loss": 1.9238,
"step": 3
},
{
"epoch": 0.013852813852813853,
"grad_norm": 0.1802988503389903,
"learning_rate": 2e-05,
"loss": 1.8901,
"step": 4
},
{
"epoch": 0.017316017316017316,
"grad_norm": 0.18865054609738205,
"learning_rate": 2.5e-05,
"loss": 1.9419,
"step": 5
},
{
"epoch": 0.02077922077922078,
"grad_norm": 0.2153919517946603,
"learning_rate": 3e-05,
"loss": 1.9561,
"step": 6
},
{
"epoch": 0.024242424242424242,
"grad_norm": 0.23646989547590208,
"learning_rate": 3.5e-05,
"loss": 1.9231,
"step": 7
},
{
"epoch": 0.027705627705627706,
"grad_norm": 0.1695483068973071,
"learning_rate": 4e-05,
"loss": 1.7199,
"step": 8
},
{
"epoch": 0.03116883116883117,
"grad_norm": 0.17169305240230742,
"learning_rate": 4.5e-05,
"loss": 1.9328,
"step": 9
},
{
"epoch": 0.03463203463203463,
"grad_norm": 0.1769763038127782,
"learning_rate": 5e-05,
"loss": 1.794,
"step": 10
},
{
"epoch": 0.0380952380952381,
"grad_norm": 0.2739291337489883,
"learning_rate": 5.500000000000001e-05,
"loss": 1.8976,
"step": 11
},
{
"epoch": 0.04155844155844156,
"grad_norm": 0.1687629241124428,
"learning_rate": 6e-05,
"loss": 1.8382,
"step": 12
},
{
"epoch": 0.045021645021645025,
"grad_norm": 0.170449291833281,
"learning_rate": 6.500000000000001e-05,
"loss": 1.8339,
"step": 13
},
{
"epoch": 0.048484848484848485,
"grad_norm": 0.13830013122227536,
"learning_rate": 7e-05,
"loss": 1.7958,
"step": 14
},
{
"epoch": 0.05194805194805195,
"grad_norm": 0.3120525656602162,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8834,
"step": 15
},
{
"epoch": 0.05541125541125541,
"grad_norm": 0.1740554404965808,
"learning_rate": 8e-05,
"loss": 1.643,
"step": 16
},
{
"epoch": 0.05887445887445888,
"grad_norm": 0.13272024244803257,
"learning_rate": 8.5e-05,
"loss": 1.7635,
"step": 17
},
{
"epoch": 0.06233766233766234,
"grad_norm": 0.13865354195619714,
"learning_rate": 9e-05,
"loss": 1.8596,
"step": 18
},
{
"epoch": 0.0658008658008658,
"grad_norm": 0.17558880062270332,
"learning_rate": 9.5e-05,
"loss": 2.0007,
"step": 19
},
{
"epoch": 0.06926406926406926,
"grad_norm": 0.18945613013501378,
"learning_rate": 0.0001,
"loss": 1.8773,
"step": 20
},
{
"epoch": 0.07272727272727272,
"grad_norm": 0.11111588495801177,
"learning_rate": 9.999697629917739e-05,
"loss": 1.8568,
"step": 21
},
{
"epoch": 0.0761904761904762,
"grad_norm": 0.2167792483298019,
"learning_rate": 9.998790560305473e-05,
"loss": 1.7966,
"step": 22
},
{
"epoch": 0.07965367965367966,
"grad_norm": 0.17058063410322674,
"learning_rate": 9.997278913061298e-05,
"loss": 1.8296,
"step": 23
},
{
"epoch": 0.08311688311688312,
"grad_norm": 0.22535134477990126,
"learning_rate": 9.995162891330504e-05,
"loss": 1.8398,
"step": 24
},
{
"epoch": 0.08658008658008658,
"grad_norm": 0.13284807817034902,
"learning_rate": 9.992442779478275e-05,
"loss": 1.7536,
"step": 25
},
{
"epoch": 0.09004329004329005,
"grad_norm": 0.1525380710864997,
"learning_rate": 9.989118943051471e-05,
"loss": 1.8267,
"step": 26
},
{
"epoch": 0.09350649350649351,
"grad_norm": 0.38389909659403076,
"learning_rate": 9.985191828729519e-05,
"loss": 1.7526,
"step": 27
},
{
"epoch": 0.09696969696969697,
"grad_norm": 0.1311981474834846,
"learning_rate": 9.98066196426436e-05,
"loss": 1.8789,
"step": 28
},
{
"epoch": 0.10043290043290043,
"grad_norm": 0.14321635310477376,
"learning_rate": 9.97552995840955e-05,
"loss": 1.8666,
"step": 29
},
{
"epoch": 0.1038961038961039,
"grad_norm": 0.131774192226824,
"learning_rate": 9.969796500838434e-05,
"loss": 1.8831,
"step": 30
},
{
"epoch": 0.10735930735930736,
"grad_norm": 0.13710505067770457,
"learning_rate": 9.963462362051473e-05,
"loss": 1.6771,
"step": 31
},
{
"epoch": 0.11082251082251082,
"grad_norm": 0.14077801488589667,
"learning_rate": 9.956528393272697e-05,
"loss": 1.8743,
"step": 32
},
{
"epoch": 0.11428571428571428,
"grad_norm": 0.1606260003339504,
"learning_rate": 9.94899552633531e-05,
"loss": 1.7463,
"step": 33
},
{
"epoch": 0.11774891774891776,
"grad_norm": 0.20844538767698467,
"learning_rate": 9.940864773556466e-05,
"loss": 1.8095,
"step": 34
},
{
"epoch": 0.12121212121212122,
"grad_norm": 0.13051717096877563,
"learning_rate": 9.932137227601224e-05,
"loss": 1.743,
"step": 35
},
{
"epoch": 0.12467532467532468,
"grad_norm": 0.1273670930785435,
"learning_rate": 9.922814061335716e-05,
"loss": 1.8164,
"step": 36
},
{
"epoch": 0.12813852813852813,
"grad_norm": 0.14632843388917796,
"learning_rate": 9.912896527669518e-05,
"loss": 1.6922,
"step": 37
},
{
"epoch": 0.1316017316017316,
"grad_norm": 0.1877238002560958,
"learning_rate": 9.902385959387282e-05,
"loss": 1.8563,
"step": 38
},
{
"epoch": 0.13506493506493505,
"grad_norm": 0.12224962058562377,
"learning_rate": 9.891283768969633e-05,
"loss": 1.7657,
"step": 39
},
{
"epoch": 0.13852813852813853,
"grad_norm": 0.1552638716363645,
"learning_rate": 9.879591448403333e-05,
"loss": 1.9331,
"step": 40
},
{
"epoch": 0.141991341991342,
"grad_norm": 0.3572833915022296,
"learning_rate": 9.867310568980802e-05,
"loss": 1.6578,
"step": 41
},
{
"epoch": 0.14545454545454545,
"grad_norm": 0.9532440952793683,
"learning_rate": 9.854442781088935e-05,
"loss": 1.8014,
"step": 42
},
{
"epoch": 0.14891774891774892,
"grad_norm": 0.23015183389227475,
"learning_rate": 9.840989813987326e-05,
"loss": 1.6852,
"step": 43
},
{
"epoch": 0.1523809523809524,
"grad_norm": 0.13909635416408656,
"learning_rate": 9.826953475575873e-05,
"loss": 1.7686,
"step": 44
},
{
"epoch": 0.15584415584415584,
"grad_norm": 0.14004251970994497,
"learning_rate": 9.812335652151818e-05,
"loss": 1.8545,
"step": 45
},
{
"epoch": 0.15930735930735931,
"grad_norm": 0.12804047264438845,
"learning_rate": 9.797138308156262e-05,
"loss": 1.8549,
"step": 46
},
{
"epoch": 0.16277056277056276,
"grad_norm": 0.15888098268224185,
"learning_rate": 9.781363485910162e-05,
"loss": 1.8095,
"step": 47
},
{
"epoch": 0.16623376623376623,
"grad_norm": 0.1753776918443242,
"learning_rate": 9.765013305339872e-05,
"loss": 1.7596,
"step": 48
},
{
"epoch": 0.1696969696969697,
"grad_norm": 0.1542382423932175,
"learning_rate": 9.748089963692256e-05,
"loss": 1.9146,
"step": 49
},
{
"epoch": 0.17316017316017315,
"grad_norm": 0.13847988655949817,
"learning_rate": 9.730595735239407e-05,
"loss": 1.7984,
"step": 50
},
{
"epoch": 0.17662337662337663,
"grad_norm": 0.4071014591163337,
"learning_rate": 9.712532970973013e-05,
"loss": 1.7578,
"step": 51
},
{
"epoch": 0.1800865800865801,
"grad_norm": 0.17428941847352164,
"learning_rate": 9.693904098288415e-05,
"loss": 1.8245,
"step": 52
},
{
"epoch": 0.18354978354978355,
"grad_norm": 0.12001384773806148,
"learning_rate": 9.674711620658393e-05,
"loss": 1.8891,
"step": 53
},
{
"epoch": 0.18701298701298702,
"grad_norm": 0.14927831515795748,
"learning_rate": 9.654958117296748e-05,
"loss": 1.7755,
"step": 54
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.1328911465672095,
"learning_rate": 9.634646242811673e-05,
"loss": 1.8446,
"step": 55
},
{
"epoch": 0.19393939393939394,
"grad_norm": 0.22985304080720362,
"learning_rate": 9.613778726849014e-05,
"loss": 1.8517,
"step": 56
},
{
"epoch": 0.1974025974025974,
"grad_norm": 0.20379099943758505,
"learning_rate": 9.592358373725448e-05,
"loss": 1.6363,
"step": 57
},
{
"epoch": 0.20086580086580086,
"grad_norm": 0.13109221546818925,
"learning_rate": 9.570388062051613e-05,
"loss": 1.7453,
"step": 58
},
{
"epoch": 0.20432900432900433,
"grad_norm": 0.2032056177408556,
"learning_rate": 9.547870744345262e-05,
"loss": 1.8279,
"step": 59
},
{
"epoch": 0.20432900432900433,
"eval_loss": 1.999111294746399,
"eval_runtime": 293.5683,
"eval_samples_per_second": 0.341,
"eval_steps_per_second": 0.17,
"step": 59
}
],
"logging_steps": 1,
"max_steps": 291,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 59,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7038165458092032.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}