|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.1228803145736053, |
|
"eval_steps": 500, |
|
"global_step": 7500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008192020971573687, |
|
"grad_norm": 0.2447698563337326, |
|
"learning_rate": 0.0001494, |
|
"loss": 2.4052, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.016384041943147375, |
|
"grad_norm": 0.29972031712532043, |
|
"learning_rate": 0.00029939999999999996, |
|
"loss": 1.8314, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.024576062914721062, |
|
"grad_norm": 0.22787177562713623, |
|
"learning_rate": 0.0004494, |
|
"loss": 1.6945, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.03276808388629475, |
|
"grad_norm": 0.21664758026599884, |
|
"learning_rate": 0.0005993999999999999, |
|
"loss": 1.5686, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.04096010485786843, |
|
"grad_norm": 0.177582785487175, |
|
"learning_rate": 0.0005998946571323422, |
|
"loss": 1.4698, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.049152125829442124, |
|
"grad_norm": 0.21073713898658752, |
|
"learning_rate": 0.0005995770092517366, |
|
"loss": 1.41, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.05734414680101581, |
|
"grad_norm": 0.17612150311470032, |
|
"learning_rate": 0.0005990472795339199, |
|
"loss": 1.3714, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.0655361677725895, |
|
"grad_norm": 0.17588965594768524, |
|
"learning_rate": 0.0005983058429944914, |
|
"loss": 1.3247, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.07372818874416319, |
|
"grad_norm": 0.17322216928005219, |
|
"learning_rate": 0.0005973532245242761, |
|
"loss": 1.2806, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.08192020971573687, |
|
"grad_norm": 0.1861853152513504, |
|
"learning_rate": 0.0005961900985177338, |
|
"loss": 1.279, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.09011223068731056, |
|
"grad_norm": 0.14911051094532013, |
|
"learning_rate": 0.0005948172883955301, |
|
"loss": 1.2516, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.09830425165888425, |
|
"grad_norm": 0.18009094893932343, |
|
"learning_rate": 0.000593235766021606, |
|
"loss": 1.2326, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.10649627263045794, |
|
"grad_norm": 0.15231232345104218, |
|
"learning_rate": 0.000591446651015159, |
|
"loss": 1.2247, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.11468829360203162, |
|
"grad_norm": 0.14419229328632355, |
|
"learning_rate": 0.0005894512099580222, |
|
"loss": 1.2203, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.1228803145736053, |
|
"grad_norm": 0.1426558941602707, |
|
"learning_rate": 0.0005872508554980035, |
|
"loss": 1.2077, |
|
"step": 7500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 61035, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.2542017536e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|