prxy5607's picture
Training in progress, step 200, checkpoint
0b55a94 verified
{
"best_metric": 3.651360034942627,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 2.962962962962963,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014814814814814815,
"grad_norm": 68.14942932128906,
"learning_rate": 1e-05,
"loss": 15.6754,
"step": 1
},
{
"epoch": 0.014814814814814815,
"eval_loss": 4.450826644897461,
"eval_runtime": 1.3431,
"eval_samples_per_second": 84.876,
"eval_steps_per_second": 21.591,
"step": 1
},
{
"epoch": 0.02962962962962963,
"grad_norm": 61.27214813232422,
"learning_rate": 2e-05,
"loss": 16.7019,
"step": 2
},
{
"epoch": 0.044444444444444446,
"grad_norm": 68.24706268310547,
"learning_rate": 3e-05,
"loss": 16.8992,
"step": 3
},
{
"epoch": 0.05925925925925926,
"grad_norm": 59.184932708740234,
"learning_rate": 4e-05,
"loss": 16.9906,
"step": 4
},
{
"epoch": 0.07407407407407407,
"grad_norm": 75.5377197265625,
"learning_rate": 5e-05,
"loss": 17.1453,
"step": 5
},
{
"epoch": 0.08888888888888889,
"grad_norm": 64.0133285522461,
"learning_rate": 6e-05,
"loss": 17.0698,
"step": 6
},
{
"epoch": 0.1037037037037037,
"grad_norm": 67.4994888305664,
"learning_rate": 7e-05,
"loss": 16.9055,
"step": 7
},
{
"epoch": 0.11851851851851852,
"grad_norm": 65.94906616210938,
"learning_rate": 8e-05,
"loss": 16.7561,
"step": 8
},
{
"epoch": 0.13333333333333333,
"grad_norm": 62.68586730957031,
"learning_rate": 9e-05,
"loss": 17.2859,
"step": 9
},
{
"epoch": 0.14814814814814814,
"grad_norm": 65.58512115478516,
"learning_rate": 0.0001,
"loss": 16.832,
"step": 10
},
{
"epoch": 0.16296296296296298,
"grad_norm": 61.67501449584961,
"learning_rate": 9.999316524962345e-05,
"loss": 17.3085,
"step": 11
},
{
"epoch": 0.17777777777777778,
"grad_norm": 57.07966995239258,
"learning_rate": 9.997266286704631e-05,
"loss": 17.0353,
"step": 12
},
{
"epoch": 0.1925925925925926,
"grad_norm": 55.02193832397461,
"learning_rate": 9.993849845741524e-05,
"loss": 16.7799,
"step": 13
},
{
"epoch": 0.2074074074074074,
"grad_norm": 47.15218734741211,
"learning_rate": 9.989068136093873e-05,
"loss": 17.5271,
"step": 14
},
{
"epoch": 0.2222222222222222,
"grad_norm": 48.52050018310547,
"learning_rate": 9.98292246503335e-05,
"loss": 17.6621,
"step": 15
},
{
"epoch": 0.23703703703703705,
"grad_norm": 53.22841262817383,
"learning_rate": 9.975414512725057e-05,
"loss": 18.4284,
"step": 16
},
{
"epoch": 0.2518518518518518,
"grad_norm": 39.201499938964844,
"learning_rate": 9.966546331768191e-05,
"loss": 14.8461,
"step": 17
},
{
"epoch": 0.26666666666666666,
"grad_norm": 46.266292572021484,
"learning_rate": 9.956320346634876e-05,
"loss": 15.3912,
"step": 18
},
{
"epoch": 0.2814814814814815,
"grad_norm": 47.28651428222656,
"learning_rate": 9.944739353007344e-05,
"loss": 15.371,
"step": 19
},
{
"epoch": 0.2962962962962963,
"grad_norm": 50.28116226196289,
"learning_rate": 9.931806517013612e-05,
"loss": 15.393,
"step": 20
},
{
"epoch": 0.3111111111111111,
"grad_norm": 52.24303436279297,
"learning_rate": 9.917525374361912e-05,
"loss": 14.56,
"step": 21
},
{
"epoch": 0.32592592592592595,
"grad_norm": 64.50386047363281,
"learning_rate": 9.901899829374047e-05,
"loss": 15.4156,
"step": 22
},
{
"epoch": 0.34074074074074073,
"grad_norm": 64.96419525146484,
"learning_rate": 9.884934153917997e-05,
"loss": 14.7247,
"step": 23
},
{
"epoch": 0.35555555555555557,
"grad_norm": 69.31340026855469,
"learning_rate": 9.86663298624003e-05,
"loss": 15.3642,
"step": 24
},
{
"epoch": 0.37037037037037035,
"grad_norm": 95.78976440429688,
"learning_rate": 9.847001329696653e-05,
"loss": 15.7602,
"step": 25
},
{
"epoch": 0.3851851851851852,
"grad_norm": 99.71963500976562,
"learning_rate": 9.826044551386744e-05,
"loss": 15.5932,
"step": 26
},
{
"epoch": 0.4,
"grad_norm": 107.60958862304688,
"learning_rate": 9.803768380684242e-05,
"loss": 16.1262,
"step": 27
},
{
"epoch": 0.4148148148148148,
"grad_norm": 134.14157104492188,
"learning_rate": 9.780178907671789e-05,
"loss": 16.6154,
"step": 28
},
{
"epoch": 0.42962962962962964,
"grad_norm": 146.53665161132812,
"learning_rate": 9.755282581475769e-05,
"loss": 16.7915,
"step": 29
},
{
"epoch": 0.4444444444444444,
"grad_norm": 152.47879028320312,
"learning_rate": 9.729086208503174e-05,
"loss": 17.1235,
"step": 30
},
{
"epoch": 0.45925925925925926,
"grad_norm": 153.4506378173828,
"learning_rate": 9.701596950580806e-05,
"loss": 17.6354,
"step": 31
},
{
"epoch": 0.4740740740740741,
"grad_norm": 157.79510498046875,
"learning_rate": 9.672822322997305e-05,
"loss": 18.3468,
"step": 32
},
{
"epoch": 0.4888888888888889,
"grad_norm": 152.51951599121094,
"learning_rate": 9.642770192448536e-05,
"loss": 15.2455,
"step": 33
},
{
"epoch": 0.5037037037037037,
"grad_norm": 181.78671264648438,
"learning_rate": 9.611448774886924e-05,
"loss": 15.8809,
"step": 34
},
{
"epoch": 0.5185185185185185,
"grad_norm": 200.03890991210938,
"learning_rate": 9.578866633275288e-05,
"loss": 15.6788,
"step": 35
},
{
"epoch": 0.5333333333333333,
"grad_norm": 219.14768981933594,
"learning_rate": 9.545032675245813e-05,
"loss": 16.2354,
"step": 36
},
{
"epoch": 0.5481481481481482,
"grad_norm": 199.77487182617188,
"learning_rate": 9.509956150664796e-05,
"loss": 15.8475,
"step": 37
},
{
"epoch": 0.562962962962963,
"grad_norm": 190.43923950195312,
"learning_rate": 9.473646649103818e-05,
"loss": 16.2861,
"step": 38
},
{
"epoch": 0.5777777777777777,
"grad_norm": 210.31869506835938,
"learning_rate": 9.43611409721806e-05,
"loss": 16.5507,
"step": 39
},
{
"epoch": 0.5925925925925926,
"grad_norm": 166.53656005859375,
"learning_rate": 9.397368756032445e-05,
"loss": 16.0793,
"step": 40
},
{
"epoch": 0.6074074074074074,
"grad_norm": 164.68179321289062,
"learning_rate": 9.357421218136386e-05,
"loss": 16.0393,
"step": 41
},
{
"epoch": 0.6222222222222222,
"grad_norm": 185.03768920898438,
"learning_rate": 9.316282404787871e-05,
"loss": 16.1579,
"step": 42
},
{
"epoch": 0.6370370370370371,
"grad_norm": 200.22567749023438,
"learning_rate": 9.273963562927695e-05,
"loss": 15.8526,
"step": 43
},
{
"epoch": 0.6518518518518519,
"grad_norm": 178.33843994140625,
"learning_rate": 9.230476262104677e-05,
"loss": 16.288,
"step": 44
},
{
"epoch": 0.6666666666666666,
"grad_norm": 202.38644409179688,
"learning_rate": 9.185832391312644e-05,
"loss": 15.792,
"step": 45
},
{
"epoch": 0.6814814814814815,
"grad_norm": 233.50157165527344,
"learning_rate": 9.140044155740101e-05,
"loss": 15.8088,
"step": 46
},
{
"epoch": 0.6962962962962963,
"grad_norm": 224.35816955566406,
"learning_rate": 9.093124073433463e-05,
"loss": 16.4583,
"step": 47
},
{
"epoch": 0.7111111111111111,
"grad_norm": 279.59478759765625,
"learning_rate": 9.045084971874738e-05,
"loss": 16.9576,
"step": 48
},
{
"epoch": 0.725925925925926,
"grad_norm": 174.54611206054688,
"learning_rate": 8.995939984474624e-05,
"loss": 14.558,
"step": 49
},
{
"epoch": 0.7407407407407407,
"grad_norm": 160.77366638183594,
"learning_rate": 8.945702546981969e-05,
"loss": 15.3174,
"step": 50
},
{
"epoch": 0.7407407407407407,
"eval_loss": 3.9408531188964844,
"eval_runtime": 1.3328,
"eval_samples_per_second": 85.533,
"eval_steps_per_second": 21.758,
"step": 50
},
{
"epoch": 0.7555555555555555,
"grad_norm": 188.16419982910156,
"learning_rate": 8.894386393810563e-05,
"loss": 15.3985,
"step": 51
},
{
"epoch": 0.7703703703703704,
"grad_norm": 231.71327209472656,
"learning_rate": 8.842005554284296e-05,
"loss": 15.2165,
"step": 52
},
{
"epoch": 0.7851851851851852,
"grad_norm": 173.91954040527344,
"learning_rate": 8.788574348801675e-05,
"loss": 15.782,
"step": 53
},
{
"epoch": 0.8,
"grad_norm": 136.92684936523438,
"learning_rate": 8.73410738492077e-05,
"loss": 15.1911,
"step": 54
},
{
"epoch": 0.8148148148148148,
"grad_norm": 155.1068115234375,
"learning_rate": 8.678619553365659e-05,
"loss": 15.0718,
"step": 55
},
{
"epoch": 0.8296296296296296,
"grad_norm": 126.05249786376953,
"learning_rate": 8.622126023955446e-05,
"loss": 15.0598,
"step": 56
},
{
"epoch": 0.8444444444444444,
"grad_norm": 124.6325912475586,
"learning_rate": 8.564642241456986e-05,
"loss": 15.3749,
"step": 57
},
{
"epoch": 0.8592592592592593,
"grad_norm": 99.86504364013672,
"learning_rate": 8.506183921362443e-05,
"loss": 15.536,
"step": 58
},
{
"epoch": 0.8740740740740741,
"grad_norm": 145.73414611816406,
"learning_rate": 8.44676704559283e-05,
"loss": 15.4855,
"step": 59
},
{
"epoch": 0.8888888888888888,
"grad_norm": 107.8541030883789,
"learning_rate": 8.386407858128706e-05,
"loss": 15.3964,
"step": 60
},
{
"epoch": 0.9037037037037037,
"grad_norm": 129.00839233398438,
"learning_rate": 8.32512286056924e-05,
"loss": 14.8913,
"step": 61
},
{
"epoch": 0.9185185185185185,
"grad_norm": 120.9074935913086,
"learning_rate": 8.262928807620843e-05,
"loss": 15.4931,
"step": 62
},
{
"epoch": 0.9333333333333333,
"grad_norm": 155.10159301757812,
"learning_rate": 8.199842702516583e-05,
"loss": 14.9525,
"step": 63
},
{
"epoch": 0.9481481481481482,
"grad_norm": 104.62696838378906,
"learning_rate": 8.135881792367686e-05,
"loss": 15.1854,
"step": 64
},
{
"epoch": 0.9629629629629629,
"grad_norm": 117.71562957763672,
"learning_rate": 8.07106356344834e-05,
"loss": 15.1467,
"step": 65
},
{
"epoch": 0.9777777777777777,
"grad_norm": 107.33856964111328,
"learning_rate": 8.005405736415126e-05,
"loss": 14.967,
"step": 66
},
{
"epoch": 0.9925925925925926,
"grad_norm": 112.39717864990234,
"learning_rate": 7.938926261462366e-05,
"loss": 15.4031,
"step": 67
},
{
"epoch": 1.0074074074074073,
"grad_norm": 141.64866638183594,
"learning_rate": 7.871643313414718e-05,
"loss": 15.2419,
"step": 68
},
{
"epoch": 1.0222222222222221,
"grad_norm": 121.95929718017578,
"learning_rate": 7.803575286758364e-05,
"loss": 14.615,
"step": 69
},
{
"epoch": 1.037037037037037,
"grad_norm": 144.60166931152344,
"learning_rate": 7.734740790612136e-05,
"loss": 14.6254,
"step": 70
},
{
"epoch": 1.0518518518518518,
"grad_norm": 140.1562042236328,
"learning_rate": 7.66515864363997e-05,
"loss": 14.7595,
"step": 71
},
{
"epoch": 1.0666666666666667,
"grad_norm": 126.63314056396484,
"learning_rate": 7.594847868906076e-05,
"loss": 14.7871,
"step": 72
},
{
"epoch": 1.0814814814814815,
"grad_norm": 159.34571838378906,
"learning_rate": 7.52382768867422e-05,
"loss": 14.0148,
"step": 73
},
{
"epoch": 1.0962962962962963,
"grad_norm": 203.79522705078125,
"learning_rate": 7.452117519152542e-05,
"loss": 14.8249,
"step": 74
},
{
"epoch": 1.1111111111111112,
"grad_norm": 240.75645446777344,
"learning_rate": 7.379736965185368e-05,
"loss": 14.2129,
"step": 75
},
{
"epoch": 1.125925925925926,
"grad_norm": 140.5721893310547,
"learning_rate": 7.30670581489344e-05,
"loss": 14.9591,
"step": 76
},
{
"epoch": 1.1407407407407408,
"grad_norm": 129.59762573242188,
"learning_rate": 7.233044034264034e-05,
"loss": 14.5692,
"step": 77
},
{
"epoch": 1.1555555555555554,
"grad_norm": 138.79296875,
"learning_rate": 7.158771761692464e-05,
"loss": 14.6784,
"step": 78
},
{
"epoch": 1.1703703703703703,
"grad_norm": 152.30995178222656,
"learning_rate": 7.083909302476453e-05,
"loss": 14.4456,
"step": 79
},
{
"epoch": 1.1851851851851851,
"grad_norm": 118.80451202392578,
"learning_rate": 7.008477123264848e-05,
"loss": 14.408,
"step": 80
},
{
"epoch": 1.2,
"grad_norm": 172.7019500732422,
"learning_rate": 6.932495846462261e-05,
"loss": 14.3868,
"step": 81
},
{
"epoch": 1.2148148148148148,
"grad_norm": 132.16468811035156,
"learning_rate": 6.855986244591104e-05,
"loss": 15.2983,
"step": 82
},
{
"epoch": 1.2296296296296296,
"grad_norm": 145.1970672607422,
"learning_rate": 6.778969234612584e-05,
"loss": 15.4357,
"step": 83
},
{
"epoch": 1.2444444444444445,
"grad_norm": 124.29164123535156,
"learning_rate": 6.701465872208216e-05,
"loss": 14.2407,
"step": 84
},
{
"epoch": 1.2592592592592593,
"grad_norm": 127.04141235351562,
"learning_rate": 6.623497346023418e-05,
"loss": 13.8837,
"step": 85
},
{
"epoch": 1.2740740740740741,
"grad_norm": 107.40375518798828,
"learning_rate": 6.545084971874738e-05,
"loss": 14.5091,
"step": 86
},
{
"epoch": 1.2888888888888888,
"grad_norm": 136.55715942382812,
"learning_rate": 6.466250186922325e-05,
"loss": 14.5735,
"step": 87
},
{
"epoch": 1.3037037037037038,
"grad_norm": 120.40228271484375,
"learning_rate": 6.387014543809223e-05,
"loss": 15.0006,
"step": 88
},
{
"epoch": 1.3185185185185184,
"grad_norm": 144.55484008789062,
"learning_rate": 6.307399704769099e-05,
"loss": 14.8894,
"step": 89
},
{
"epoch": 1.3333333333333333,
"grad_norm": 103.71250915527344,
"learning_rate": 6.227427435703997e-05,
"loss": 15.0077,
"step": 90
},
{
"epoch": 1.348148148148148,
"grad_norm": 102.85832977294922,
"learning_rate": 6.147119600233758e-05,
"loss": 14.5431,
"step": 91
},
{
"epoch": 1.362962962962963,
"grad_norm": 96.97740936279297,
"learning_rate": 6.066498153718735e-05,
"loss": 14.2197,
"step": 92
},
{
"epoch": 1.3777777777777778,
"grad_norm": 94.78760528564453,
"learning_rate": 5.985585137257401e-05,
"loss": 14.8501,
"step": 93
},
{
"epoch": 1.3925925925925926,
"grad_norm": 112.6998291015625,
"learning_rate": 5.90440267166055e-05,
"loss": 14.511,
"step": 94
},
{
"epoch": 1.4074074074074074,
"grad_norm": 115.46554565429688,
"learning_rate": 5.8229729514036705e-05,
"loss": 15.395,
"step": 95
},
{
"epoch": 1.4222222222222223,
"grad_norm": 136.27862548828125,
"learning_rate": 5.74131823855921e-05,
"loss": 14.743,
"step": 96
},
{
"epoch": 1.4370370370370371,
"grad_norm": 141.50924682617188,
"learning_rate": 5.6594608567103456e-05,
"loss": 14.578,
"step": 97
},
{
"epoch": 1.4518518518518517,
"grad_norm": 148.98236083984375,
"learning_rate": 5.577423184847932e-05,
"loss": 15.4565,
"step": 98
},
{
"epoch": 1.4666666666666668,
"grad_norm": 129.44525146484375,
"learning_rate": 5.495227651252315e-05,
"loss": 15.646,
"step": 99
},
{
"epoch": 1.4814814814814814,
"grad_norm": 136.50833129882812,
"learning_rate": 5.4128967273616625e-05,
"loss": 14.8919,
"step": 100
},
{
"epoch": 1.4814814814814814,
"eval_loss": 3.7402379512786865,
"eval_runtime": 1.3308,
"eval_samples_per_second": 85.662,
"eval_steps_per_second": 21.791,
"step": 100
},
{
"epoch": 1.4962962962962962,
"grad_norm": 102.5388412475586,
"learning_rate": 5.330452921628497e-05,
"loss": 14.9052,
"step": 101
},
{
"epoch": 1.511111111111111,
"grad_norm": 113.43592071533203,
"learning_rate": 5.247918773366112e-05,
"loss": 14.3789,
"step": 102
},
{
"epoch": 1.525925925925926,
"grad_norm": 91.35191345214844,
"learning_rate": 5.165316846586541e-05,
"loss": 14.427,
"step": 103
},
{
"epoch": 1.5407407407407407,
"grad_norm": 99.83700561523438,
"learning_rate": 5.0826697238317935e-05,
"loss": 14.2309,
"step": 104
},
{
"epoch": 1.5555555555555556,
"grad_norm": 84.13626098632812,
"learning_rate": 5e-05,
"loss": 14.0855,
"step": 105
},
{
"epoch": 1.5703703703703704,
"grad_norm": 101.99211120605469,
"learning_rate": 4.917330276168208e-05,
"loss": 14.439,
"step": 106
},
{
"epoch": 1.585185185185185,
"grad_norm": 104.00724029541016,
"learning_rate": 4.834683153413459e-05,
"loss": 14.1921,
"step": 107
},
{
"epoch": 1.6,
"grad_norm": 102.696533203125,
"learning_rate": 4.7520812266338885e-05,
"loss": 14.504,
"step": 108
},
{
"epoch": 1.6148148148148147,
"grad_norm": 140.89016723632812,
"learning_rate": 4.669547078371504e-05,
"loss": 14.9026,
"step": 109
},
{
"epoch": 1.6296296296296298,
"grad_norm": 114.30033874511719,
"learning_rate": 4.5871032726383386e-05,
"loss": 14.743,
"step": 110
},
{
"epoch": 1.6444444444444444,
"grad_norm": 142.01661682128906,
"learning_rate": 4.504772348747687e-05,
"loss": 15.1088,
"step": 111
},
{
"epoch": 1.6592592592592592,
"grad_norm": 125.36564636230469,
"learning_rate": 4.4225768151520694e-05,
"loss": 15.057,
"step": 112
},
{
"epoch": 1.674074074074074,
"grad_norm": 105.30064392089844,
"learning_rate": 4.3405391432896555e-05,
"loss": 14.48,
"step": 113
},
{
"epoch": 1.6888888888888889,
"grad_norm": 148.7479248046875,
"learning_rate": 4.2586817614407895e-05,
"loss": 14.541,
"step": 114
},
{
"epoch": 1.7037037037037037,
"grad_norm": 145.95252990722656,
"learning_rate": 4.17702704859633e-05,
"loss": 13.9083,
"step": 115
},
{
"epoch": 1.7185185185185186,
"grad_norm": 140.0341796875,
"learning_rate": 4.095597328339452e-05,
"loss": 14.2814,
"step": 116
},
{
"epoch": 1.7333333333333334,
"grad_norm": 110.64568328857422,
"learning_rate": 4.0144148627425993e-05,
"loss": 14.6479,
"step": 117
},
{
"epoch": 1.748148148148148,
"grad_norm": 114.68722534179688,
"learning_rate": 3.933501846281267e-05,
"loss": 14.9445,
"step": 118
},
{
"epoch": 1.762962962962963,
"grad_norm": 121.93843078613281,
"learning_rate": 3.852880399766243e-05,
"loss": 14.5333,
"step": 119
},
{
"epoch": 1.7777777777777777,
"grad_norm": 139.56668090820312,
"learning_rate": 3.772572564296005e-05,
"loss": 15.3822,
"step": 120
},
{
"epoch": 1.7925925925925927,
"grad_norm": 156.67764282226562,
"learning_rate": 3.6926002952309016e-05,
"loss": 14.5472,
"step": 121
},
{
"epoch": 1.8074074074074074,
"grad_norm": 145.6569366455078,
"learning_rate": 3.612985456190778e-05,
"loss": 15.3665,
"step": 122
},
{
"epoch": 1.8222222222222222,
"grad_norm": 150.45404052734375,
"learning_rate": 3.533749813077677e-05,
"loss": 14.5629,
"step": 123
},
{
"epoch": 1.837037037037037,
"grad_norm": 173.68316650390625,
"learning_rate": 3.4549150281252636e-05,
"loss": 14.0435,
"step": 124
},
{
"epoch": 1.8518518518518519,
"grad_norm": 192.74942016601562,
"learning_rate": 3.3765026539765834e-05,
"loss": 14.9827,
"step": 125
},
{
"epoch": 1.8666666666666667,
"grad_norm": 181.27590942382812,
"learning_rate": 3.298534127791785e-05,
"loss": 14.5132,
"step": 126
},
{
"epoch": 1.8814814814814815,
"grad_norm": 174.27249145507812,
"learning_rate": 3.221030765387417e-05,
"loss": 14.9845,
"step": 127
},
{
"epoch": 1.8962962962962964,
"grad_norm": 198.6359100341797,
"learning_rate": 3.144013755408895e-05,
"loss": 13.6079,
"step": 128
},
{
"epoch": 1.911111111111111,
"grad_norm": 200.7653045654297,
"learning_rate": 3.0675041535377405e-05,
"loss": 14.8407,
"step": 129
},
{
"epoch": 1.925925925925926,
"grad_norm": 184.41754150390625,
"learning_rate": 2.991522876735154e-05,
"loss": 14.2502,
"step": 130
},
{
"epoch": 1.9407407407407407,
"grad_norm": 227.7645721435547,
"learning_rate": 2.916090697523549e-05,
"loss": 14.3757,
"step": 131
},
{
"epoch": 1.9555555555555557,
"grad_norm": 157.26063537597656,
"learning_rate": 2.8412282383075363e-05,
"loss": 14.7581,
"step": 132
},
{
"epoch": 1.9703703703703703,
"grad_norm": 157.49798583984375,
"learning_rate": 2.766955965735968e-05,
"loss": 14.1769,
"step": 133
},
{
"epoch": 1.9851851851851852,
"grad_norm": 170.1768341064453,
"learning_rate": 2.693294185106562e-05,
"loss": 14.5068,
"step": 134
},
{
"epoch": 2.0,
"grad_norm": 219.3101806640625,
"learning_rate": 2.6202630348146324e-05,
"loss": 13.9866,
"step": 135
},
{
"epoch": 2.0148148148148146,
"grad_norm": 135.1683349609375,
"learning_rate": 2.547882480847461e-05,
"loss": 13.996,
"step": 136
},
{
"epoch": 2.0296296296296297,
"grad_norm": 147.524658203125,
"learning_rate": 2.476172311325783e-05,
"loss": 14.2326,
"step": 137
},
{
"epoch": 2.0444444444444443,
"grad_norm": 126.68768310546875,
"learning_rate": 2.405152131093926e-05,
"loss": 14.4419,
"step": 138
},
{
"epoch": 2.0592592592592593,
"grad_norm": 138.6258544921875,
"learning_rate": 2.3348413563600325e-05,
"loss": 14.6305,
"step": 139
},
{
"epoch": 2.074074074074074,
"grad_norm": 152.36740112304688,
"learning_rate": 2.2652592093878666e-05,
"loss": 14.3344,
"step": 140
},
{
"epoch": 2.088888888888889,
"grad_norm": 227.44227600097656,
"learning_rate": 2.196424713241637e-05,
"loss": 13.9444,
"step": 141
},
{
"epoch": 2.1037037037037036,
"grad_norm": 162.27259826660156,
"learning_rate": 2.128356686585282e-05,
"loss": 14.0645,
"step": 142
},
{
"epoch": 2.1185185185185187,
"grad_norm": 161.71438598632812,
"learning_rate": 2.061073738537635e-05,
"loss": 14.2054,
"step": 143
},
{
"epoch": 2.1333333333333333,
"grad_norm": 140.36068725585938,
"learning_rate": 1.9945942635848748e-05,
"loss": 14.1545,
"step": 144
},
{
"epoch": 2.148148148148148,
"grad_norm": 182.17013549804688,
"learning_rate": 1.928936436551661e-05,
"loss": 14.4722,
"step": 145
},
{
"epoch": 2.162962962962963,
"grad_norm": 150.6356964111328,
"learning_rate": 1.8641182076323148e-05,
"loss": 13.8913,
"step": 146
},
{
"epoch": 2.1777777777777776,
"grad_norm": 181.77224731445312,
"learning_rate": 1.800157297483417e-05,
"loss": 14.2508,
"step": 147
},
{
"epoch": 2.1925925925925926,
"grad_norm": 159.9928741455078,
"learning_rate": 1.7370711923791567e-05,
"loss": 14.8011,
"step": 148
},
{
"epoch": 2.2074074074074073,
"grad_norm": 198.1991424560547,
"learning_rate": 1.6748771394307585e-05,
"loss": 14.3488,
"step": 149
},
{
"epoch": 2.2222222222222223,
"grad_norm": 156.02816772460938,
"learning_rate": 1.6135921418712956e-05,
"loss": 14.9636,
"step": 150
},
{
"epoch": 2.2222222222222223,
"eval_loss": 3.6782333850860596,
"eval_runtime": 1.3381,
"eval_samples_per_second": 85.196,
"eval_steps_per_second": 21.673,
"step": 150
},
{
"epoch": 2.237037037037037,
"grad_norm": 203.73736572265625,
"learning_rate": 1.553232954407171e-05,
"loss": 14.9,
"step": 151
},
{
"epoch": 2.251851851851852,
"grad_norm": 153.0937957763672,
"learning_rate": 1.4938160786375572e-05,
"loss": 14.0142,
"step": 152
},
{
"epoch": 2.2666666666666666,
"grad_norm": 117.68968963623047,
"learning_rate": 1.435357758543015e-05,
"loss": 14.3636,
"step": 153
},
{
"epoch": 2.2814814814814817,
"grad_norm": 137.20506286621094,
"learning_rate": 1.3778739760445552e-05,
"loss": 13.7021,
"step": 154
},
{
"epoch": 2.2962962962962963,
"grad_norm": 161.4315185546875,
"learning_rate": 1.3213804466343421e-05,
"loss": 14.8398,
"step": 155
},
{
"epoch": 2.311111111111111,
"grad_norm": 162.87452697753906,
"learning_rate": 1.2658926150792322e-05,
"loss": 14.2438,
"step": 156
},
{
"epoch": 2.325925925925926,
"grad_norm": 255.86105346679688,
"learning_rate": 1.2114256511983274e-05,
"loss": 14.2575,
"step": 157
},
{
"epoch": 2.3407407407407406,
"grad_norm": 171.7278594970703,
"learning_rate": 1.157994445715706e-05,
"loss": 14.3891,
"step": 158
},
{
"epoch": 2.3555555555555556,
"grad_norm": 138.58694458007812,
"learning_rate": 1.1056136061894384e-05,
"loss": 14.4252,
"step": 159
},
{
"epoch": 2.3703703703703702,
"grad_norm": 131.3074188232422,
"learning_rate": 1.0542974530180327e-05,
"loss": 13.6398,
"step": 160
},
{
"epoch": 2.3851851851851853,
"grad_norm": 132.39199829101562,
"learning_rate": 1.0040600155253765e-05,
"loss": 13.8621,
"step": 161
},
{
"epoch": 2.4,
"grad_norm": 208.60899353027344,
"learning_rate": 9.549150281252633e-06,
"loss": 13.7883,
"step": 162
},
{
"epoch": 2.414814814814815,
"grad_norm": 151.96185302734375,
"learning_rate": 9.068759265665384e-06,
"loss": 14.6574,
"step": 163
},
{
"epoch": 2.4296296296296296,
"grad_norm": 149.67416381835938,
"learning_rate": 8.599558442598998e-06,
"loss": 13.8391,
"step": 164
},
{
"epoch": 2.4444444444444446,
"grad_norm": 179.20135498046875,
"learning_rate": 8.141676086873572e-06,
"loss": 14.5998,
"step": 165
},
{
"epoch": 2.4592592592592593,
"grad_norm": 240.76341247558594,
"learning_rate": 7.695237378953223e-06,
"loss": 14.8283,
"step": 166
},
{
"epoch": 2.474074074074074,
"grad_norm": 177.29092407226562,
"learning_rate": 7.260364370723044e-06,
"loss": 13.991,
"step": 167
},
{
"epoch": 2.488888888888889,
"grad_norm": 112.82994842529297,
"learning_rate": 6.837175952121306e-06,
"loss": 13.6775,
"step": 168
},
{
"epoch": 2.5037037037037035,
"grad_norm": 134.5693817138672,
"learning_rate": 6.425787818636131e-06,
"loss": 14.0775,
"step": 169
},
{
"epoch": 2.5185185185185186,
"grad_norm": 116.91983795166016,
"learning_rate": 6.026312439675552e-06,
"loss": 13.7677,
"step": 170
},
{
"epoch": 2.533333333333333,
"grad_norm": 164.41635131835938,
"learning_rate": 5.6388590278194096e-06,
"loss": 14.3629,
"step": 171
},
{
"epoch": 2.5481481481481483,
"grad_norm": 159.20077514648438,
"learning_rate": 5.263533508961827e-06,
"loss": 13.9835,
"step": 172
},
{
"epoch": 2.562962962962963,
"grad_norm": 119.64713287353516,
"learning_rate": 4.900438493352055e-06,
"loss": 14.1561,
"step": 173
},
{
"epoch": 2.5777777777777775,
"grad_norm": 166.39501953125,
"learning_rate": 4.549673247541875e-06,
"loss": 13.9771,
"step": 174
},
{
"epoch": 2.5925925925925926,
"grad_norm": 158.79327392578125,
"learning_rate": 4.2113336672471245e-06,
"loss": 13.909,
"step": 175
},
{
"epoch": 2.6074074074074076,
"grad_norm": 170.07273864746094,
"learning_rate": 3.885512251130763e-06,
"loss": 14.2716,
"step": 176
},
{
"epoch": 2.6222222222222222,
"grad_norm": 175.40963745117188,
"learning_rate": 3.5722980755146517e-06,
"loss": 13.8635,
"step": 177
},
{
"epoch": 2.637037037037037,
"grad_norm": 165.8638458251953,
"learning_rate": 3.271776770026963e-06,
"loss": 15.226,
"step": 178
},
{
"epoch": 2.651851851851852,
"grad_norm": 156.8959197998047,
"learning_rate": 2.9840304941919415e-06,
"loss": 13.9328,
"step": 179
},
{
"epoch": 2.6666666666666665,
"grad_norm": 137.80484008789062,
"learning_rate": 2.7091379149682685e-06,
"loss": 13.5431,
"step": 180
},
{
"epoch": 2.6814814814814816,
"grad_norm": 157.63360595703125,
"learning_rate": 2.4471741852423237e-06,
"loss": 14.3916,
"step": 181
},
{
"epoch": 2.696296296296296,
"grad_norm": 170.91567993164062,
"learning_rate": 2.1982109232821178e-06,
"loss": 13.752,
"step": 182
},
{
"epoch": 2.7111111111111112,
"grad_norm": 174.46258544921875,
"learning_rate": 1.962316193157593e-06,
"loss": 13.6756,
"step": 183
},
{
"epoch": 2.725925925925926,
"grad_norm": 123.57269287109375,
"learning_rate": 1.7395544861325718e-06,
"loss": 13.3758,
"step": 184
},
{
"epoch": 2.7407407407407405,
"grad_norm": 113.82854461669922,
"learning_rate": 1.5299867030334814e-06,
"loss": 14.2526,
"step": 185
},
{
"epoch": 2.7555555555555555,
"grad_norm": 147.1977996826172,
"learning_rate": 1.333670137599713e-06,
"loss": 14.2643,
"step": 186
},
{
"epoch": 2.7703703703703706,
"grad_norm": 134.13990783691406,
"learning_rate": 1.1506584608200367e-06,
"loss": 13.8424,
"step": 187
},
{
"epoch": 2.785185185185185,
"grad_norm": 136.7030029296875,
"learning_rate": 9.810017062595322e-07,
"loss": 14.0423,
"step": 188
},
{
"epoch": 2.8,
"grad_norm": 158.4160614013672,
"learning_rate": 8.247462563808817e-07,
"loss": 13.6131,
"step": 189
},
{
"epoch": 2.814814814814815,
"grad_norm": 151.19692993164062,
"learning_rate": 6.819348298638839e-07,
"loss": 14.2082,
"step": 190
},
{
"epoch": 2.8296296296296295,
"grad_norm": 201.72494506835938,
"learning_rate": 5.526064699265753e-07,
"loss": 13.4682,
"step": 191
},
{
"epoch": 2.8444444444444446,
"grad_norm": 144.17710876464844,
"learning_rate": 4.367965336512403e-07,
"loss": 14.1444,
"step": 192
},
{
"epoch": 2.859259259259259,
"grad_norm": 149.2476806640625,
"learning_rate": 3.3453668231809286e-07,
"loss": 14.1861,
"step": 193
},
{
"epoch": 2.8740740740740742,
"grad_norm": 154.860107421875,
"learning_rate": 2.458548727494292e-07,
"loss": 14.5229,
"step": 194
},
{
"epoch": 2.888888888888889,
"grad_norm": 136.7914276123047,
"learning_rate": 1.7077534966650766e-07,
"loss": 14.2759,
"step": 195
},
{
"epoch": 2.9037037037037035,
"grad_norm": 183.71226501464844,
"learning_rate": 1.0931863906127327e-07,
"loss": 13.7139,
"step": 196
},
{
"epoch": 2.9185185185185185,
"grad_norm": 169.7905731201172,
"learning_rate": 6.150154258476315e-08,
"loss": 13.6161,
"step": 197
},
{
"epoch": 2.9333333333333336,
"grad_norm": 161.94277954101562,
"learning_rate": 2.7337132953697554e-08,
"loss": 14.7131,
"step": 198
},
{
"epoch": 2.948148148148148,
"grad_norm": 203.21755981445312,
"learning_rate": 6.834750376549792e-09,
"loss": 13.5579,
"step": 199
},
{
"epoch": 2.962962962962963,
"grad_norm": 139.2266387939453,
"learning_rate": 0.0,
"loss": 14.1591,
"step": 200
},
{
"epoch": 2.962962962962963,
"eval_loss": 3.651360034942627,
"eval_runtime": 1.3259,
"eval_samples_per_second": 85.981,
"eval_steps_per_second": 21.872,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4874493864378368e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}