|
{ |
|
"best_metric": 0.8369658589363098, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-200", |
|
"epoch": 0.4357298474945534, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002178649237472767, |
|
"eval_loss": 1.642710566520691, |
|
"eval_runtime": 11.0208, |
|
"eval_samples_per_second": 13.157, |
|
"eval_steps_per_second": 3.357, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02178649237472767, |
|
"grad_norm": 13.529788970947266, |
|
"learning_rate": 5.095e-06, |
|
"loss": 1.5568, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04357298474945534, |
|
"grad_norm": 11.73664379119873, |
|
"learning_rate": 1.019e-05, |
|
"loss": 1.3614, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06535947712418301, |
|
"grad_norm": 15.36020565032959, |
|
"learning_rate": 9.623888888888889e-06, |
|
"loss": 1.1538, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08714596949891068, |
|
"grad_norm": 16.68719482421875, |
|
"learning_rate": 9.057777777777777e-06, |
|
"loss": 0.9726, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10893246187363835, |
|
"grad_norm": 8.945978164672852, |
|
"learning_rate": 8.491666666666667e-06, |
|
"loss": 0.8609, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10893246187363835, |
|
"eval_loss": 1.0293898582458496, |
|
"eval_runtime": 11.263, |
|
"eval_samples_per_second": 12.874, |
|
"eval_steps_per_second": 3.285, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13071895424836602, |
|
"grad_norm": 8.820780754089355, |
|
"learning_rate": 7.925555555555557e-06, |
|
"loss": 1.1589, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15250544662309368, |
|
"grad_norm": 7.298919677734375, |
|
"learning_rate": 7.359444444444445e-06, |
|
"loss": 0.9633, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17429193899782136, |
|
"grad_norm": 6.904092311859131, |
|
"learning_rate": 6.793333333333333e-06, |
|
"loss": 0.8975, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19607843137254902, |
|
"grad_norm": 9.39885425567627, |
|
"learning_rate": 6.227222222222223e-06, |
|
"loss": 0.8465, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2178649237472767, |
|
"grad_norm": 9.127683639526367, |
|
"learning_rate": 5.661111111111112e-06, |
|
"loss": 0.751, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2178649237472767, |
|
"eval_loss": 0.9036096930503845, |
|
"eval_runtime": 11.2686, |
|
"eval_samples_per_second": 12.868, |
|
"eval_steps_per_second": 3.283, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23965141612200436, |
|
"grad_norm": 7.476004123687744, |
|
"learning_rate": 5.095e-06, |
|
"loss": 1.0529, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.26143790849673204, |
|
"grad_norm": 7.453296184539795, |
|
"learning_rate": 4.5288888888888885e-06, |
|
"loss": 0.9474, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.28322440087145967, |
|
"grad_norm": 8.616524696350098, |
|
"learning_rate": 3.9627777777777784e-06, |
|
"loss": 0.7555, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.30501089324618735, |
|
"grad_norm": 9.001409530639648, |
|
"learning_rate": 3.3966666666666666e-06, |
|
"loss": 0.8331, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.32679738562091504, |
|
"grad_norm": 9.718732833862305, |
|
"learning_rate": 2.830555555555556e-06, |
|
"loss": 0.7389, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.32679738562091504, |
|
"eval_loss": 0.8491891622543335, |
|
"eval_runtime": 11.2774, |
|
"eval_samples_per_second": 12.858, |
|
"eval_steps_per_second": 3.281, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3485838779956427, |
|
"grad_norm": 6.21028995513916, |
|
"learning_rate": 2.2644444444444443e-06, |
|
"loss": 1.008, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.37037037037037035, |
|
"grad_norm": 7.034985542297363, |
|
"learning_rate": 1.6983333333333333e-06, |
|
"loss": 0.8855, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.39215686274509803, |
|
"grad_norm": 7.719512462615967, |
|
"learning_rate": 1.1322222222222221e-06, |
|
"loss": 0.8906, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4139433551198257, |
|
"grad_norm": 5.853115081787109, |
|
"learning_rate": 5.661111111111111e-07, |
|
"loss": 0.8076, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4357298474945534, |
|
"grad_norm": 12.761053085327148, |
|
"learning_rate": 0.0, |
|
"loss": 0.6258, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4357298474945534, |
|
"eval_loss": 0.8369658589363098, |
|
"eval_runtime": 11.2719, |
|
"eval_samples_per_second": 12.864, |
|
"eval_steps_per_second": 3.282, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.35744505488343e+16, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|