leixa commited on
Commit
4e5e453
·
verified ·
1 Parent(s): 629eb58

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d6b51cc3bc62b8190fc18b578d89b8a61b859ba3b6b1b7b9ac63fe594b8124b
3
  size 25192592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169e89b9cbb961dfd9f07b43711bba3573a2bf03f61255023fc0e62008c8708f
3
  size 25192592
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93f438f2604fdceaa16d93ca245522dd9a43f19b74b0e189fcf464959a4a8d46
3
  size 13005370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30384a35942eb32b0b6b7c8078c8d783147ff874a45074c7d124f6998f5e97ac
3
  size 13005370
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f6558da346ac45529467675e437cb6dae4b9aaab0fcbe184bbb74c2ab6cbd9d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d3a73943c34a73bf5f52472f78c27f6da00349623f69eea36c35a9ed670e41e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:baec16f1d3203641053b140012c961fd8b8e1dac231c353313afb13a49c1d1fe
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d4e004ca157e9e2e51f06820328f72b00d1e93f5fe5770010a13d46b658813
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 2.546696424484253,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-1350",
4
- "epoch": 2.33160621761658,
5
  "eval_steps": 150,
6
- "global_step": 1350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1032,6 +1032,119 @@
1032
  "eval_samples_per_second": 139.461,
1033
  "eval_steps_per_second": 34.865,
1034
  "step": 1350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1035
  }
1036
  ],
1037
  "logging_steps": 10,
@@ -1046,7 +1159,7 @@
1046
  "early_stopping_threshold": 0.0
1047
  },
1048
  "attributes": {
1049
- "early_stopping_patience_counter": 0
1050
  }
1051
  },
1052
  "TrainerControl": {
@@ -1055,12 +1168,12 @@
1055
  "should_evaluate": false,
1056
  "should_log": false,
1057
  "should_save": true,
1058
- "should_training_stop": false
1059
  },
1060
  "attributes": {}
1061
  }
1062
  },
1063
- "total_flos": 2.389102308832051e+16,
1064
  "train_batch_size": 4,
1065
  "trial_name": null,
1066
  "trial_params": null
 
1
  {
2
  "best_metric": 2.546696424484253,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-1350",
4
+ "epoch": 2.5906735751295336,
5
  "eval_steps": 150,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1032
  "eval_samples_per_second": 139.461,
1033
  "eval_steps_per_second": 34.865,
1034
  "step": 1350
1035
+ },
1036
+ {
1037
+ "epoch": 2.3488773747841103,
1038
+ "grad_norm": 146.88360595703125,
1039
+ "learning_rate": 0.0001,
1040
+ "loss": 10.1538,
1041
+ "step": 1360
1042
+ },
1043
+ {
1044
+ "epoch": 2.366148531951641,
1045
+ "grad_norm": 186.4013671875,
1046
+ "learning_rate": 0.0001,
1047
+ "loss": 10.0984,
1048
+ "step": 1370
1049
+ },
1050
+ {
1051
+ "epoch": 2.383419689119171,
1052
+ "grad_norm": 78.0069351196289,
1053
+ "learning_rate": 0.0001,
1054
+ "loss": 10.1345,
1055
+ "step": 1380
1056
+ },
1057
+ {
1058
+ "epoch": 2.400690846286701,
1059
+ "grad_norm": 61.604061126708984,
1060
+ "learning_rate": 0.0001,
1061
+ "loss": 10.3753,
1062
+ "step": 1390
1063
+ },
1064
+ {
1065
+ "epoch": 2.4179620034542313,
1066
+ "grad_norm": 76.18042755126953,
1067
+ "learning_rate": 0.0001,
1068
+ "loss": 10.3891,
1069
+ "step": 1400
1070
+ },
1071
+ {
1072
+ "epoch": 2.4352331606217614,
1073
+ "grad_norm": 108.20265197753906,
1074
+ "learning_rate": 0.0001,
1075
+ "loss": 10.0772,
1076
+ "step": 1410
1077
+ },
1078
+ {
1079
+ "epoch": 2.452504317789292,
1080
+ "grad_norm": 76.92615509033203,
1081
+ "learning_rate": 0.0001,
1082
+ "loss": 9.8017,
1083
+ "step": 1420
1084
+ },
1085
+ {
1086
+ "epoch": 2.469775474956822,
1087
+ "grad_norm": 138.38558959960938,
1088
+ "learning_rate": 0.0001,
1089
+ "loss": 9.6187,
1090
+ "step": 1430
1091
+ },
1092
+ {
1093
+ "epoch": 2.4870466321243523,
1094
+ "grad_norm": 171.43023681640625,
1095
+ "learning_rate": 0.0001,
1096
+ "loss": 9.9722,
1097
+ "step": 1440
1098
+ },
1099
+ {
1100
+ "epoch": 2.5043177892918824,
1101
+ "grad_norm": 136.78587341308594,
1102
+ "learning_rate": 0.0001,
1103
+ "loss": 11.1977,
1104
+ "step": 1450
1105
+ },
1106
+ {
1107
+ "epoch": 2.5215889464594126,
1108
+ "grad_norm": 144.79205322265625,
1109
+ "learning_rate": 0.0001,
1110
+ "loss": 10.4862,
1111
+ "step": 1460
1112
+ },
1113
+ {
1114
+ "epoch": 2.538860103626943,
1115
+ "grad_norm": 127.37538146972656,
1116
+ "learning_rate": 0.0001,
1117
+ "loss": 10.8806,
1118
+ "step": 1470
1119
+ },
1120
+ {
1121
+ "epoch": 2.5561312607944733,
1122
+ "grad_norm": 122.02765655517578,
1123
+ "learning_rate": 0.0001,
1124
+ "loss": 10.4921,
1125
+ "step": 1480
1126
+ },
1127
+ {
1128
+ "epoch": 2.5734024179620034,
1129
+ "grad_norm": 653.7969970703125,
1130
+ "learning_rate": 0.0001,
1131
+ "loss": 10.2225,
1132
+ "step": 1490
1133
+ },
1134
+ {
1135
+ "epoch": 2.5906735751295336,
1136
+ "grad_norm": 125.18404388427734,
1137
+ "learning_rate": 0.0001,
1138
+ "loss": 10.6133,
1139
+ "step": 1500
1140
+ },
1141
+ {
1142
+ "epoch": 2.5906735751295336,
1143
+ "eval_loss": 2.5841307640075684,
1144
+ "eval_runtime": 3.5077,
1145
+ "eval_samples_per_second": 139.122,
1146
+ "eval_steps_per_second": 34.78,
1147
+ "step": 1500
1148
  }
1149
  ],
1150
  "logging_steps": 10,
 
1159
  "early_stopping_threshold": 0.0
1160
  },
1161
  "attributes": {
1162
+ "early_stopping_patience_counter": 1
1163
  }
1164
  },
1165
  "TrainerControl": {
 
1168
  "should_evaluate": false,
1169
  "should_log": false,
1170
  "should_save": true,
1171
+ "should_training_stop": true
1172
  },
1173
  "attributes": {}
1174
  }
1175
  },
1176
+ "total_flos": 2.654607288513331e+16,
1177
  "train_batch_size": 4,
1178
  "trial_name": null,
1179
  "trial_params": null