Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
amrn's picture
Model save
9a9d392 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9948186528497409,
"eval_steps": 100,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029607698001480384,
"grad_norm": 2.3427891450842355,
"learning_rate": 5.882352941176471e-06,
"loss": 1.0993,
"step": 5
},
{
"epoch": 0.05921539600296077,
"grad_norm": 1.638241993747605,
"learning_rate": 1.1764705882352942e-05,
"loss": 1.0402,
"step": 10
},
{
"epoch": 0.08882309400444116,
"grad_norm": 0.8970481588482149,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.9533,
"step": 15
},
{
"epoch": 0.11843079200592153,
"grad_norm": 0.6126224631372397,
"learning_rate": 1.9980527694749952e-05,
"loss": 0.8922,
"step": 20
},
{
"epoch": 0.14803849000740193,
"grad_norm": 0.5491637741037821,
"learning_rate": 1.986180478852149e-05,
"loss": 0.8551,
"step": 25
},
{
"epoch": 0.17764618800888232,
"grad_norm": 0.4357332212949219,
"learning_rate": 1.963645895935632e-05,
"loss": 0.8282,
"step": 30
},
{
"epoch": 0.20725388601036268,
"grad_norm": 0.3870290405518921,
"learning_rate": 1.930692657985482e-05,
"loss": 0.8229,
"step": 35
},
{
"epoch": 0.23686158401184307,
"grad_norm": 0.41071978450812635,
"learning_rate": 1.887677045685188e-05,
"loss": 0.8225,
"step": 40
},
{
"epoch": 0.2664692820133235,
"grad_norm": 0.35511835037971484,
"learning_rate": 1.8350641311400813e-05,
"loss": 0.8073,
"step": 45
},
{
"epoch": 0.29607698001480387,
"grad_norm": 0.34562914008715157,
"learning_rate": 1.773422749654988e-05,
"loss": 0.7977,
"step": 50
},
{
"epoch": 0.32568467801628426,
"grad_norm": 0.38429745030506335,
"learning_rate": 1.7034193496547903e-05,
"loss": 0.7877,
"step": 55
},
{
"epoch": 0.35529237601776464,
"grad_norm": 0.38450100811350485,
"learning_rate": 1.6258107872407376e-05,
"loss": 0.7982,
"step": 60
},
{
"epoch": 0.38490007401924503,
"grad_norm": 0.3508167601846127,
"learning_rate": 1.5414361432856475e-05,
"loss": 0.7722,
"step": 65
},
{
"epoch": 0.41450777202072536,
"grad_norm": 0.354273236485806,
"learning_rate": 1.4512076515391375e-05,
"loss": 0.7741,
"step": 70
},
{
"epoch": 0.44411547002220575,
"grad_norm": 0.3435732479373877,
"learning_rate": 1.356100835825547e-05,
"loss": 0.7852,
"step": 75
},
{
"epoch": 0.47372316802368614,
"grad_norm": 0.3554552572459275,
"learning_rate": 1.257143962968246e-05,
"loss": 0.7667,
"step": 80
},
{
"epoch": 0.5033308660251665,
"grad_norm": 0.37807154489345807,
"learning_rate": 1.155406925472205e-05,
"loss": 0.7707,
"step": 85
},
{
"epoch": 0.532938564026647,
"grad_norm": 0.3667445569072502,
"learning_rate": 1.0519896741619803e-05,
"loss": 0.7701,
"step": 90
},
{
"epoch": 0.5625462620281273,
"grad_norm": 0.3813236840767762,
"learning_rate": 9.480103258380198e-06,
"loss": 0.7772,
"step": 95
},
{
"epoch": 0.5921539600296077,
"grad_norm": 0.34559010442801097,
"learning_rate": 8.445930745277953e-06,
"loss": 0.7531,
"step": 100
},
{
"epoch": 0.5921539600296077,
"eval_loss": 0.7882063388824463,
"eval_runtime": 1.6572,
"eval_samples_per_second": 77.237,
"eval_steps_per_second": 2.414,
"step": 100
},
{
"epoch": 0.6217616580310881,
"grad_norm": 0.341364069372392,
"learning_rate": 7.428560370317542e-06,
"loss": 0.7571,
"step": 105
},
{
"epoch": 0.6513693560325685,
"grad_norm": 0.3559969343392935,
"learning_rate": 6.438991641744531e-06,
"loss": 0.7682,
"step": 110
},
{
"epoch": 0.6809770540340488,
"grad_norm": 0.3161514819141605,
"learning_rate": 5.487923484608629e-06,
"loss": 0.7625,
"step": 115
},
{
"epoch": 0.7105847520355293,
"grad_norm": 0.30339451601959244,
"learning_rate": 4.5856385671435285e-06,
"loss": 0.7515,
"step": 120
},
{
"epoch": 0.7401924500370096,
"grad_norm": 0.35261640845743664,
"learning_rate": 3.7418921275926245e-06,
"loss": 0.7539,
"step": 125
},
{
"epoch": 0.7698001480384901,
"grad_norm": 0.3193239804674359,
"learning_rate": 2.965806503452098e-06,
"loss": 0.7556,
"step": 130
},
{
"epoch": 0.7994078460399704,
"grad_norm": 0.32912058451560244,
"learning_rate": 2.265772503450122e-06,
"loss": 0.7586,
"step": 135
},
{
"epoch": 0.8290155440414507,
"grad_norm": 0.303685802235117,
"learning_rate": 1.6493586885991908e-06,
"loss": 0.75,
"step": 140
},
{
"epoch": 0.8586232420429312,
"grad_norm": 0.3173990670917867,
"learning_rate": 1.1232295431481222e-06,
"loss": 0.7568,
"step": 145
},
{
"epoch": 0.8882309400444115,
"grad_norm": 0.3010954114203951,
"learning_rate": 6.930734201451817e-07,
"loss": 0.7668,
"step": 150
},
{
"epoch": 0.9178386380458919,
"grad_norm": 0.30232823482832394,
"learning_rate": 3.635410406436857e-07,
"loss": 0.7616,
"step": 155
},
{
"epoch": 0.9474463360473723,
"grad_norm": 0.28964694770453414,
"learning_rate": 1.3819521147851122e-07,
"loss": 0.7609,
"step": 160
},
{
"epoch": 0.9770540340488527,
"grad_norm": 0.28146626492833826,
"learning_rate": 1.947230525005006e-08,
"loss": 0.7448,
"step": 165
},
{
"epoch": 0.9948186528497409,
"step": 168,
"total_flos": 76517996494848.0,
"train_loss": 0.8023917121546609,
"train_runtime": 1111.3885,
"train_samples_per_second": 19.444,
"train_steps_per_second": 0.151
}
],
"logging_steps": 5,
"max_steps": 168,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 76517996494848.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}