|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02321262766945218, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00046425255338904364, |
|
"eval_loss": 4.059295177459717, |
|
"eval_runtime": 20.2414, |
|
"eval_samples_per_second": 44.809, |
|
"eval_steps_per_second": 22.429, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002321262766945218, |
|
"grad_norm": 113.8918228149414, |
|
"learning_rate": 5e-05, |
|
"loss": 18.5564, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004642525533890436, |
|
"grad_norm": 107.09974670410156, |
|
"learning_rate": 0.0001, |
|
"loss": 16.0422, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004642525533890436, |
|
"eval_loss": 3.889435291290283, |
|
"eval_runtime": 18.1949, |
|
"eval_samples_per_second": 49.849, |
|
"eval_steps_per_second": 24.952, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006963788300835654, |
|
"grad_norm": 112.6549072265625, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 14.8815, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009285051067780872, |
|
"grad_norm": 109.76182556152344, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 13.0329, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009285051067780872, |
|
"eval_loss": 3.405557155609131, |
|
"eval_runtime": 18.2908, |
|
"eval_samples_per_second": 49.588, |
|
"eval_steps_per_second": 24.821, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01160631383472609, |
|
"grad_norm": 98.89693450927734, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 12.6359, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.013927576601671309, |
|
"grad_norm": 95.26651000976562, |
|
"learning_rate": 5e-05, |
|
"loss": 12.3584, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.013927576601671309, |
|
"eval_loss": 3.173269271850586, |
|
"eval_runtime": 18.1951, |
|
"eval_samples_per_second": 49.849, |
|
"eval_steps_per_second": 24.952, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.016248839368616527, |
|
"grad_norm": 108.44436645507812, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 12.8901, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.018570102135561744, |
|
"grad_norm": 110.787841796875, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 11.8677, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.018570102135561744, |
|
"eval_loss": 3.076159954071045, |
|
"eval_runtime": 18.4863, |
|
"eval_samples_per_second": 49.063, |
|
"eval_steps_per_second": 24.559, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020891364902506964, |
|
"grad_norm": 124.56420135498047, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 12.2453, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02321262766945218, |
|
"grad_norm": 124.69161224365234, |
|
"learning_rate": 0.0, |
|
"loss": 12.4255, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02321262766945218, |
|
"eval_loss": 3.0605356693267822, |
|
"eval_runtime": 18.4891, |
|
"eval_samples_per_second": 49.056, |
|
"eval_steps_per_second": 24.555, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 440836043046912.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|