Aivesa's picture
Training in progress, step 10, checkpoint
0e466f0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0009290658243136526,
"eval_steps": 3,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 9.290658243136526e-05,
"grad_norm": 42.913795471191406,
"learning_rate": 2e-05,
"loss": 13.8547,
"step": 1
},
{
"epoch": 0.00018581316486273051,
"grad_norm": 37.62971878051758,
"learning_rate": 4e-05,
"loss": 13.9481,
"step": 2
},
{
"epoch": 0.00027871974729409577,
"grad_norm": 43.87750244140625,
"learning_rate": 6e-05,
"loss": 13.5865,
"step": 3
},
{
"epoch": 0.00027871974729409577,
"eval_loss": 3.4376113414764404,
"eval_runtime": 118.0395,
"eval_samples_per_second": 38.394,
"eval_steps_per_second": 19.197,
"step": 3
},
{
"epoch": 0.00037162632972546103,
"grad_norm": 50.38986587524414,
"learning_rate": 8e-05,
"loss": 13.6738,
"step": 4
},
{
"epoch": 0.0004645329121568263,
"grad_norm": 51.37370681762695,
"learning_rate": 0.0001,
"loss": 15.8409,
"step": 5
},
{
"epoch": 0.0005574394945881915,
"grad_norm": 65.8694839477539,
"learning_rate": 0.00012,
"loss": 13.3023,
"step": 6
},
{
"epoch": 0.0005574394945881915,
"eval_loss": 3.433248519897461,
"eval_runtime": 118.5003,
"eval_samples_per_second": 38.245,
"eval_steps_per_second": 19.122,
"step": 6
},
{
"epoch": 0.0006503460770195569,
"grad_norm": 44.13718032836914,
"learning_rate": 0.00014,
"loss": 13.328,
"step": 7
},
{
"epoch": 0.0007432526594509221,
"grad_norm": 52.688873291015625,
"learning_rate": 0.00016,
"loss": 13.0226,
"step": 8
},
{
"epoch": 0.0008361592418822874,
"grad_norm": 60.45896911621094,
"learning_rate": 0.00018,
"loss": 13.8277,
"step": 9
},
{
"epoch": 0.0008361592418822874,
"eval_loss": 3.415884494781494,
"eval_runtime": 116.4689,
"eval_samples_per_second": 38.912,
"eval_steps_per_second": 19.456,
"step": 9
},
{
"epoch": 0.0009290658243136526,
"grad_norm": 53.029144287109375,
"learning_rate": 0.0002,
"loss": 14.5572,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 92114994069504.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}