|
{ |
|
"best_metric": 1.0066642761230469, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.09090909090909091, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018181818181818182, |
|
"grad_norm": 138.68397521972656, |
|
"learning_rate": 5e-06, |
|
"loss": 14.5689, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0018181818181818182, |
|
"eval_loss": 4.257202625274658, |
|
"eval_runtime": 15.5048, |
|
"eval_samples_per_second": 59.788, |
|
"eval_steps_per_second": 29.926, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0036363636363636364, |
|
"grad_norm": 122.03857421875, |
|
"learning_rate": 1e-05, |
|
"loss": 15.5416, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005454545454545455, |
|
"grad_norm": 139.68202209472656, |
|
"learning_rate": 1.5e-05, |
|
"loss": 16.1754, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007272727272727273, |
|
"grad_norm": 154.2438507080078, |
|
"learning_rate": 2e-05, |
|
"loss": 15.2385, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00909090909090909, |
|
"grad_norm": 146.93878173828125, |
|
"learning_rate": 2.5e-05, |
|
"loss": 15.5613, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01090909090909091, |
|
"grad_norm": 120.0749282836914, |
|
"learning_rate": 3e-05, |
|
"loss": 14.4842, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.012727272727272728, |
|
"grad_norm": 126.85943603515625, |
|
"learning_rate": 3.5e-05, |
|
"loss": 13.4979, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.014545454545454545, |
|
"grad_norm": 175.2422637939453, |
|
"learning_rate": 4e-05, |
|
"loss": 12.2678, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.016363636363636365, |
|
"grad_norm": 129.5550537109375, |
|
"learning_rate": 4.5e-05, |
|
"loss": 11.7683, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01818181818181818, |
|
"grad_norm": 114.66398620605469, |
|
"learning_rate": 5e-05, |
|
"loss": 10.7645, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 109.88460540771484, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 9.8579, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02181818181818182, |
|
"grad_norm": 122.863037109375, |
|
"learning_rate": 6e-05, |
|
"loss": 9.304, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.023636363636363636, |
|
"grad_norm": 123.73020935058594, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 8.3192, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.025454545454545455, |
|
"grad_norm": 147.00076293945312, |
|
"learning_rate": 7e-05, |
|
"loss": 7.907, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02727272727272727, |
|
"grad_norm": 338.5273132324219, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 8.9278, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02909090909090909, |
|
"grad_norm": 143.44017028808594, |
|
"learning_rate": 8e-05, |
|
"loss": 7.7997, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03090909090909091, |
|
"grad_norm": 175.75901794433594, |
|
"learning_rate": 8.5e-05, |
|
"loss": 7.0567, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03272727272727273, |
|
"grad_norm": 161.01739501953125, |
|
"learning_rate": 9e-05, |
|
"loss": 7.9452, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.034545454545454546, |
|
"grad_norm": 217.87303161621094, |
|
"learning_rate": 9.5e-05, |
|
"loss": 8.277, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03636363636363636, |
|
"grad_norm": 193.74627685546875, |
|
"learning_rate": 0.0001, |
|
"loss": 7.6536, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.038181818181818185, |
|
"grad_norm": 253.91064453125, |
|
"learning_rate": 9.999238475781957e-05, |
|
"loss": 6.6774, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 213.30625915527344, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 6.5032, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04181818181818182, |
|
"grad_norm": 161.59420776367188, |
|
"learning_rate": 9.99314767377287e-05, |
|
"loss": 6.4562, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04363636363636364, |
|
"grad_norm": 163.30369567871094, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 6.051, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.045454545454545456, |
|
"grad_norm": 164.57496643066406, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 5.5738, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04727272727272727, |
|
"grad_norm": 137.8650360107422, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 5.9991, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04909090909090909, |
|
"grad_norm": 147.88990783691406, |
|
"learning_rate": 9.962730758206611e-05, |
|
"loss": 5.2916, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05090909090909091, |
|
"grad_norm": 175.40298461914062, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 5.123, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05272727272727273, |
|
"grad_norm": 143.73516845703125, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 4.696, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05454545454545454, |
|
"grad_norm": 162.42837524414062, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 4.4559, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.056363636363636366, |
|
"grad_norm": 138.40577697753906, |
|
"learning_rate": 9.908135917238321e-05, |
|
"loss": 5.1119, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05818181818181818, |
|
"grad_norm": 152.168212890625, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 4.7344, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 221.61441040039062, |
|
"learning_rate": 9.871850323926177e-05, |
|
"loss": 4.8725, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06181818181818182, |
|
"grad_norm": 134.0053253173828, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 4.1396, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06363636363636363, |
|
"grad_norm": 169.15768432617188, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 4.7024, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06545454545454546, |
|
"grad_norm": 295.5130310058594, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 4.3401, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06727272727272728, |
|
"grad_norm": 170.70733642578125, |
|
"learning_rate": 9.781523779815179e-05, |
|
"loss": 5.3419, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06909090909090909, |
|
"grad_norm": 150.62545776367188, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 4.5806, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07090909090909091, |
|
"grad_norm": 159.96763610839844, |
|
"learning_rate": 9.727592877996585e-05, |
|
"loss": 4.1185, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07272727272727272, |
|
"grad_norm": 145.9441680908203, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 4.1673, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07454545454545454, |
|
"grad_norm": 107.2767333984375, |
|
"learning_rate": 9.667902132486009e-05, |
|
"loss": 4.7757, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07636363636363637, |
|
"grad_norm": 204.49026489257812, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 4.7596, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07818181818181819, |
|
"grad_norm": 115.9788818359375, |
|
"learning_rate": 9.602524267262203e-05, |
|
"loss": 5.3676, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 70.77046966552734, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 3.8195, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08181818181818182, |
|
"grad_norm": 74.69246673583984, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 4.8097, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08363636363636363, |
|
"grad_norm": 90.72003173828125, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 5.028, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08545454545454545, |
|
"grad_norm": 85.53260803222656, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 4.1861, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08727272727272728, |
|
"grad_norm": 90.01811981201172, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 4.2923, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0890909090909091, |
|
"grad_norm": 103.90205383300781, |
|
"learning_rate": 9.373098535696979e-05, |
|
"loss": 4.0331, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 54.6790657043457, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 5.0269, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"eval_loss": 1.0066642761230469, |
|
"eval_runtime": 15.4751, |
|
"eval_samples_per_second": 59.903, |
|
"eval_steps_per_second": 29.984, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3725608982937600.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|