|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.987012987012987, |
|
"eval_steps": 500, |
|
"global_step": 19, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"grad_norm": 0.17318853735923767, |
|
"learning_rate": 9.931806517013612e-06, |
|
"loss": 0.2793, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1038961038961039, |
|
"grad_norm": 0.16861183941364288, |
|
"learning_rate": 9.729086208503174e-06, |
|
"loss": 0.2526, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.15584415584415584, |
|
"grad_norm": 0.20030151307582855, |
|
"learning_rate": 9.397368756032445e-06, |
|
"loss": 0.314, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2077922077922078, |
|
"grad_norm": 0.1905766725540161, |
|
"learning_rate": 8.94570254698197e-06, |
|
"loss": 0.3651, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.2597402597402597, |
|
"grad_norm": 0.18305057287216187, |
|
"learning_rate": 8.386407858128707e-06, |
|
"loss": 0.2511, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3116883116883117, |
|
"grad_norm": 0.1884309947490692, |
|
"learning_rate": 7.734740790612137e-06, |
|
"loss": 0.3179, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 0.197285994887352, |
|
"learning_rate": 7.008477123264849e-06, |
|
"loss": 0.3045, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.4155844155844156, |
|
"grad_norm": 0.2820747494697571, |
|
"learning_rate": 6.227427435703997e-06, |
|
"loss": 0.2897, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.4675324675324675, |
|
"grad_norm": 0.17939220368862152, |
|
"learning_rate": 5.412896727361663e-06, |
|
"loss": 0.3061, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.5194805194805194, |
|
"grad_norm": 0.18881358206272125, |
|
"learning_rate": 4.587103272638339e-06, |
|
"loss": 0.2938, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.18471910059452057, |
|
"learning_rate": 3.7725725642960047e-06, |
|
"loss": 0.2933, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.6233766233766234, |
|
"grad_norm": 0.16619093716144562, |
|
"learning_rate": 2.991522876735154e-06, |
|
"loss": 0.2563, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6753246753246753, |
|
"grad_norm": 0.17355136573314667, |
|
"learning_rate": 2.265259209387867e-06, |
|
"loss": 0.2928, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.1705784648656845, |
|
"learning_rate": 1.6135921418712959e-06, |
|
"loss": 0.3275, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"grad_norm": 0.16323448717594147, |
|
"learning_rate": 1.0542974530180327e-06, |
|
"loss": 0.2644, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.8311688311688312, |
|
"grad_norm": 0.17525160312652588, |
|
"learning_rate": 6.026312439675553e-07, |
|
"loss": 0.2977, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8831168831168831, |
|
"grad_norm": 0.1585198938846588, |
|
"learning_rate": 2.7091379149682683e-07, |
|
"loss": 0.2762, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.935064935064935, |
|
"grad_norm": 0.16804182529449463, |
|
"learning_rate": 6.819348298638839e-08, |
|
"loss": 0.3112, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.987012987012987, |
|
"grad_norm": 0.1660885363817215, |
|
"learning_rate": 0.0, |
|
"loss": 0.3045, |
|
"step": 19 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 19, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.8521915203190784e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|