hosseinbv commited on
Commit
1b36299
·
verified ·
1 Parent(s): 23a4f5f

Uploading /ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-19

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: TinyLlama/TinyLlama_v1.1
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: newData-progressive-yoco-tiny-llama-CDL-19
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # newData-progressive-yoco-tiny-llama-CDL-19
18
+
19
+ This model is a fine-tuned version of [/ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-20/checkpoint-50](https://huggingface.co//ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-20/checkpoint-50) on the alpaca_reformatted, the UltraInteract_sft_reformatted, the reformatted_ultrachat_200k, the reformatted_MathInstruct and the small_slim_pajama datasets.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 58
40
+ - eval_batch_size: 1
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 1856
46
+ - total_eval_batch_size: 8
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.005
50
+ - training_steps: 50
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.45.2
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 3.1.0
61
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.05959475566150179,
3
+ "total_flos": 106890730143744.0,
4
+ "train_loss": 1.9979763078689574,
5
+ "train_runtime": 1596.1705,
6
+ "train_samples_per_second": 58.139,
7
+ "train_steps_per_second": 0.031
8
+ }
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "TinyLlama/TinyLlama_v1.1", "architectures": ["ProgressiveYocoLlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "crossDecoder_start_idx": 3, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 5632, "max_position_embeddings": 2048, "mlp_bias": false, "model_type": "progressive_yoco_llama", "num_attention_heads": 32, "num_hidden_layers": 22, "num_key_value_heads": 4, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "transformers_version": "4.45.2", "use_cache": false, "vocab_size": 32000}
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 2048,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.45.2"
7
+ }
runs/Nov29_09-08-59_creative-turing-2/events.out.tfevents.1732871548.creative-turing-2.2669372.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c46e5f28bdd370e40f8445efe2502de326569fc588f9d95e22af03b591b549
3
+ size 16158
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ content }}{% elif message['role'] == 'assistant' %}{{ content }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.05959475566150179,
3
+ "total_flos": 106890730143744.0,
4
+ "train_loss": 1.9979763078689574,
5
+ "train_runtime": 1596.1705,
6
+ "train_samples_per_second": 58.139,
7
+ "train_steps_per_second": 0.031
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 50, "loss": 2.1072, "lr": 2e-05, "epoch": 0.0011918951132300357, "percentage": 2.0, "elapsed_time": "0:00:34", "remaining_time": "0:28:32"}
2
+ {"current_steps": 2, "total_steps": 50, "loss": 2.101, "lr": 1.9979453927503366e-05, "epoch": 0.0023837902264600714, "percentage": 4.0, "elapsed_time": "0:01:06", "remaining_time": "0:26:30"}
3
+ {"current_steps": 3, "total_steps": 50, "loss": 2.0284, "lr": 1.991790013823246e-05, "epoch": 0.003575685339690107, "percentage": 6.0, "elapsed_time": "0:01:37", "remaining_time": "0:25:32"}
4
+ {"current_steps": 4, "total_steps": 50, "loss": 2.0343, "lr": 1.9815591569910654e-05, "epoch": 0.004767580452920143, "percentage": 8.0, "elapsed_time": "0:02:09", "remaining_time": "0:24:49"}
5
+ {"current_steps": 5, "total_steps": 50, "loss": 2.022, "lr": 1.9672948630390296e-05, "epoch": 0.0059594755661501785, "percentage": 10.0, "elapsed_time": "0:02:41", "remaining_time": "0:24:10"}
6
+ {"current_steps": 6, "total_steps": 50, "loss": 2.0615, "lr": 1.949055747010669e-05, "epoch": 0.007151370679380214, "percentage": 12.0, "elapsed_time": "0:03:12", "remaining_time": "0:23:34"}
7
+ {"current_steps": 7, "total_steps": 50, "loss": 2.0193, "lr": 1.926916757346022e-05, "epoch": 0.00834326579261025, "percentage": 14.0, "elapsed_time": "0:03:44", "remaining_time": "0:22:59"}
8
+ {"current_steps": 8, "total_steps": 50, "loss": 1.9835, "lr": 1.900968867902419e-05, "epoch": 0.009535160905840286, "percentage": 16.0, "elapsed_time": "0:04:16", "remaining_time": "0:22:25"}
9
+ {"current_steps": 9, "total_steps": 50, "loss": 2.0048, "lr": 1.8713187041233896e-05, "epoch": 0.010727056019070322, "percentage": 18.0, "elapsed_time": "0:04:47", "remaining_time": "0:21:51"}
10
+ {"current_steps": 10, "total_steps": 50, "loss": 1.9825, "lr": 1.8380881048918406e-05, "epoch": 0.011918951132300357, "percentage": 20.0, "elapsed_time": "0:05:19", "remaining_time": "0:21:18"}
11
+ {"current_steps": 11, "total_steps": 50, "loss": 1.9862, "lr": 1.8014136218679566e-05, "epoch": 0.013110846245530394, "percentage": 22.0, "elapsed_time": "0:05:51", "remaining_time": "0:20:45"}
12
+ {"current_steps": 12, "total_steps": 50, "loss": 1.9776, "lr": 1.7614459583691346e-05, "epoch": 0.014302741358760428, "percentage": 24.0, "elapsed_time": "0:06:23", "remaining_time": "0:20:13"}
13
+ {"current_steps": 13, "total_steps": 50, "loss": 1.9674, "lr": 1.7183493500977277e-05, "epoch": 0.015494636471990465, "percentage": 26.0, "elapsed_time": "0:06:54", "remaining_time": "0:19:40"}
14
+ {"current_steps": 14, "total_steps": 50, "loss": 1.9734, "lr": 1.672300890261317e-05, "epoch": 0.0166865315852205, "percentage": 28.0, "elapsed_time": "0:07:26", "remaining_time": "0:19:08"}
15
+ {"current_steps": 15, "total_steps": 50, "loss": 1.9846, "lr": 1.6234898018587336e-05, "epoch": 0.017878426698450536, "percentage": 30.0, "elapsed_time": "0:07:58", "remaining_time": "0:18:36"}
16
+ {"current_steps": 16, "total_steps": 50, "loss": 1.9647, "lr": 1.5721166601221697e-05, "epoch": 0.01907032181168057, "percentage": 32.0, "elapsed_time": "0:08:30", "remaining_time": "0:18:03"}
17
+ {"current_steps": 17, "total_steps": 50, "loss": 1.9842, "lr": 1.5183925683105254e-05, "epoch": 0.02026221692491061, "percentage": 34.0, "elapsed_time": "0:09:01", "remaining_time": "0:17:31"}
18
+ {"current_steps": 18, "total_steps": 50, "loss": 1.9792, "lr": 1.4625382902408356e-05, "epoch": 0.021454112038140644, "percentage": 36.0, "elapsed_time": "0:09:33", "remaining_time": "0:16:59"}
19
+ {"current_steps": 19, "total_steps": 50, "loss": 1.992, "lr": 1.4047833431223938e-05, "epoch": 0.02264600715137068, "percentage": 38.0, "elapsed_time": "0:10:05", "remaining_time": "0:16:27"}
20
+ {"current_steps": 20, "total_steps": 50, "loss": 2.0017, "lr": 1.3453650544213078e-05, "epoch": 0.023837902264600714, "percentage": 40.0, "elapsed_time": "0:10:36", "remaining_time": "0:15:55"}
21
+ {"current_steps": 21, "total_steps": 50, "loss": 1.9654, "lr": 1.2845275866310325e-05, "epoch": 0.025029797377830752, "percentage": 42.0, "elapsed_time": "0:11:08", "remaining_time": "0:15:23"}
22
+ {"current_steps": 22, "total_steps": 50, "loss": 1.9742, "lr": 1.2225209339563144e-05, "epoch": 0.026221692491060787, "percentage": 44.0, "elapsed_time": "0:11:40", "remaining_time": "0:14:51"}
23
+ {"current_steps": 23, "total_steps": 50, "loss": 1.9899, "lr": 1.1595998950333794e-05, "epoch": 0.027413587604290822, "percentage": 46.0, "elapsed_time": "0:12:12", "remaining_time": "0:14:19"}
24
+ {"current_steps": 24, "total_steps": 50, "loss": 1.982, "lr": 1.0960230259076819e-05, "epoch": 0.028605482717520857, "percentage": 48.0, "elapsed_time": "0:12:43", "remaining_time": "0:13:47"}
25
+ {"current_steps": 25, "total_steps": 50, "loss": 1.989, "lr": 1.0320515775716556e-05, "epoch": 0.029797377830750895, "percentage": 50.0, "elapsed_time": "0:13:15", "remaining_time": "0:13:15"}
26
+ {"current_steps": 26, "total_steps": 50, "loss": 1.9926, "lr": 9.67948422428345e-06, "epoch": 0.03098927294398093, "percentage": 52.0, "elapsed_time": "0:13:47", "remaining_time": "0:12:43"}
27
+ {"current_steps": 27, "total_steps": 50, "loss": 1.9977, "lr": 9.039769740923183e-06, "epoch": 0.03218116805721097, "percentage": 54.0, "elapsed_time": "0:14:18", "remaining_time": "0:12:11"}
28
+ {"current_steps": 28, "total_steps": 50, "loss": 2.0056, "lr": 8.404001049666211e-06, "epoch": 0.033373063170441, "percentage": 56.0, "elapsed_time": "0:14:50", "remaining_time": "0:11:39"}
29
+ {"current_steps": 29, "total_steps": 50, "loss": 1.9724, "lr": 7.774790660436857e-06, "epoch": 0.03456495828367104, "percentage": 58.0, "elapsed_time": "0:15:22", "remaining_time": "0:11:07"}
30
+ {"current_steps": 30, "total_steps": 50, "loss": 1.9487, "lr": 7.154724133689677e-06, "epoch": 0.03575685339690107, "percentage": 60.0, "elapsed_time": "0:15:54", "remaining_time": "0:10:36"}
31
+ {"current_steps": 31, "total_steps": 50, "loss": 1.9871, "lr": 6.546349455786926e-06, "epoch": 0.03694874851013111, "percentage": 62.0, "elapsed_time": "0:16:25", "remaining_time": "0:10:04"}
32
+ {"current_steps": 32, "total_steps": 50, "loss": 2.0023, "lr": 5.952166568776062e-06, "epoch": 0.03814064362336114, "percentage": 64.0, "elapsed_time": "0:16:57", "remaining_time": "0:09:32"}
33
+ {"current_steps": 33, "total_steps": 50, "loss": 1.9781, "lr": 5.37461709759165e-06, "epoch": 0.03933253873659118, "percentage": 66.0, "elapsed_time": "0:17:29", "remaining_time": "0:09:00"}
34
+ {"current_steps": 34, "total_steps": 50, "loss": 2.0167, "lr": 4.81607431689475e-06, "epoch": 0.04052443384982122, "percentage": 68.0, "elapsed_time": "0:18:00", "remaining_time": "0:08:28"}
35
+ {"current_steps": 35, "total_steps": 50, "loss": 2.0014, "lr": 4.278833398778306e-06, "epoch": 0.041716328963051254, "percentage": 70.0, "elapsed_time": "0:18:32", "remaining_time": "0:07:56"}
36
+ {"current_steps": 36, "total_steps": 50, "loss": 2.0307, "lr": 3.7651019814126656e-06, "epoch": 0.04290822407628129, "percentage": 72.0, "elapsed_time": "0:19:04", "remaining_time": "0:07:25"}
37
+ {"current_steps": 37, "total_steps": 50, "loss": 1.9922, "lr": 3.2769910973868314e-06, "epoch": 0.04410011918951132, "percentage": 74.0, "elapsed_time": "0:19:36", "remaining_time": "0:06:53"}
38
+ {"current_steps": 38, "total_steps": 50, "loss": 2.009, "lr": 2.8165064990227255e-06, "epoch": 0.04529201430274136, "percentage": 76.0, "elapsed_time": "0:20:07", "remaining_time": "0:06:21"}
39
+ {"current_steps": 39, "total_steps": 50, "loss": 2.0069, "lr": 2.3855404163086558e-06, "epoch": 0.04648390941597139, "percentage": 78.0, "elapsed_time": "0:20:39", "remaining_time": "0:05:49"}
40
+ {"current_steps": 40, "total_steps": 50, "loss": 1.9649, "lr": 1.9858637813204352e-06, "epoch": 0.04767580452920143, "percentage": 80.0, "elapsed_time": "0:21:11", "remaining_time": "0:05:17"}
41
+ {"current_steps": 41, "total_steps": 50, "loss": 1.988, "lr": 1.6191189510815942e-06, "epoch": 0.04886769964243146, "percentage": 82.0, "elapsed_time": "0:21:42", "remaining_time": "0:04:46"}
42
+ {"current_steps": 42, "total_steps": 50, "loss": 1.9493, "lr": 1.286812958766106e-06, "epoch": 0.050059594755661505, "percentage": 84.0, "elapsed_time": "0:22:14", "remaining_time": "0:04:14"}
43
+ {"current_steps": 43, "total_steps": 50, "loss": 1.9827, "lr": 9.903113209758098e-07, "epoch": 0.05125148986889154, "percentage": 86.0, "elapsed_time": "0:22:46", "remaining_time": "0:03:42"}
44
+ {"current_steps": 44, "total_steps": 50, "loss": 2.0005, "lr": 7.308324265397837e-07, "epoch": 0.052443384982121574, "percentage": 88.0, "elapsed_time": "0:23:18", "remaining_time": "0:03:10"}
45
+ {"current_steps": 45, "total_steps": 50, "loss": 1.9962, "lr": 5.094425298933136e-07, "epoch": 0.05363528009535161, "percentage": 90.0, "elapsed_time": "0:23:49", "remaining_time": "0:02:38"}
46
+ {"current_steps": 46, "total_steps": 50, "loss": 2.0125, "lr": 3.2705136960970554e-07, "epoch": 0.054827175208581644, "percentage": 92.0, "elapsed_time": "0:24:21", "remaining_time": "0:02:07"}
47
+ {"current_steps": 47, "total_steps": 50, "loss": 1.9917, "lr": 1.844084300893456e-07, "epoch": 0.05601907032181168, "percentage": 94.0, "elapsed_time": "0:24:53", "remaining_time": "0:01:35"}
48
+ {"current_steps": 48, "total_steps": 50, "loss": 1.9941, "lr": 8.209986176753947e-08, "epoch": 0.057210965435041714, "percentage": 96.0, "elapsed_time": "0:25:25", "remaining_time": "0:01:03"}
49
+ {"current_steps": 49, "total_steps": 50, "loss": 1.9912, "lr": 2.054607249663665e-08, "epoch": 0.058402860548271755, "percentage": 98.0, "elapsed_time": "0:25:56", "remaining_time": "0:00:31"}
50
+ {"current_steps": 50, "total_steps": 50, "loss": 2.0301, "lr": 0.0, "epoch": 0.05959475566150179, "percentage": 100.0, "elapsed_time": "0:26:28", "remaining_time": "0:00:00"}
51
+ {"current_steps": 50, "total_steps": 50, "epoch": 0.05959475566150179, "percentage": 100.0, "elapsed_time": "0:26:36", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.05959475566150179,
5
+ "eval_steps": 50,
6
+ "global_step": 50,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0011918951132300357,
13
+ "grad_norm": 2.9031435226589246,
14
+ "learning_rate": 2e-05,
15
+ "loss": 2.1072,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.0023837902264600714,
20
+ "grad_norm": 2.7150248182983923,
21
+ "learning_rate": 1.9979453927503366e-05,
22
+ "loss": 2.101,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.003575685339690107,
27
+ "grad_norm": 1.6481585515596062,
28
+ "learning_rate": 1.991790013823246e-05,
29
+ "loss": 2.0284,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.004767580452920143,
34
+ "grad_norm": 2.290158944264602,
35
+ "learning_rate": 1.9815591569910654e-05,
36
+ "loss": 2.0343,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.0059594755661501785,
41
+ "grad_norm": 1.620010410362664,
42
+ "learning_rate": 1.9672948630390296e-05,
43
+ "loss": 2.022,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.007151370679380214,
48
+ "grad_norm": 1.2963689571694739,
49
+ "learning_rate": 1.949055747010669e-05,
50
+ "loss": 2.0615,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.00834326579261025,
55
+ "grad_norm": 1.1986327536123562,
56
+ "learning_rate": 1.926916757346022e-05,
57
+ "loss": 2.0193,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.009535160905840286,
62
+ "grad_norm": 0.7287087756055839,
63
+ "learning_rate": 1.900968867902419e-05,
64
+ "loss": 1.9835,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.010727056019070322,
69
+ "grad_norm": 0.7584838316893223,
70
+ "learning_rate": 1.8713187041233896e-05,
71
+ "loss": 2.0048,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.011918951132300357,
76
+ "grad_norm": 0.6198276917269713,
77
+ "learning_rate": 1.8380881048918406e-05,
78
+ "loss": 1.9825,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.013110846245530394,
83
+ "grad_norm": 0.46373328707102957,
84
+ "learning_rate": 1.8014136218679566e-05,
85
+ "loss": 1.9862,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.014302741358760428,
90
+ "grad_norm": 0.5577755717812845,
91
+ "learning_rate": 1.7614459583691346e-05,
92
+ "loss": 1.9776,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.015494636471990465,
97
+ "grad_norm": 0.5939670606164611,
98
+ "learning_rate": 1.7183493500977277e-05,
99
+ "loss": 1.9674,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.0166865315852205,
104
+ "grad_norm": 0.4628705732184792,
105
+ "learning_rate": 1.672300890261317e-05,
106
+ "loss": 1.9734,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.017878426698450536,
111
+ "grad_norm": 0.485766773190629,
112
+ "learning_rate": 1.6234898018587336e-05,
113
+ "loss": 1.9846,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.01907032181168057,
118
+ "grad_norm": 0.5223550727317597,
119
+ "learning_rate": 1.5721166601221697e-05,
120
+ "loss": 1.9647,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.02026221692491061,
125
+ "grad_norm": 0.4433368830898489,
126
+ "learning_rate": 1.5183925683105254e-05,
127
+ "loss": 1.9842,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.021454112038140644,
132
+ "grad_norm": 0.3532051409411447,
133
+ "learning_rate": 1.4625382902408356e-05,
134
+ "loss": 1.9792,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.02264600715137068,
139
+ "grad_norm": 0.3750713272673884,
140
+ "learning_rate": 1.4047833431223938e-05,
141
+ "loss": 1.992,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.023837902264600714,
146
+ "grad_norm": 0.3478672288559764,
147
+ "learning_rate": 1.3453650544213078e-05,
148
+ "loss": 2.0017,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.025029797377830752,
153
+ "grad_norm": 0.3124495293347313,
154
+ "learning_rate": 1.2845275866310325e-05,
155
+ "loss": 1.9654,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 0.026221692491060787,
160
+ "grad_norm": 0.30827848535728997,
161
+ "learning_rate": 1.2225209339563144e-05,
162
+ "loss": 1.9742,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 0.027413587604290822,
167
+ "grad_norm": 0.3531950759587543,
168
+ "learning_rate": 1.1595998950333794e-05,
169
+ "loss": 1.9899,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 0.028605482717520857,
174
+ "grad_norm": 0.3222872371524797,
175
+ "learning_rate": 1.0960230259076819e-05,
176
+ "loss": 1.982,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 0.029797377830750895,
181
+ "grad_norm": 0.28066157317874146,
182
+ "learning_rate": 1.0320515775716556e-05,
183
+ "loss": 1.989,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 0.03098927294398093,
188
+ "grad_norm": 0.28664391299284936,
189
+ "learning_rate": 9.67948422428345e-06,
190
+ "loss": 1.9926,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 0.03218116805721097,
195
+ "grad_norm": 0.32690441539002585,
196
+ "learning_rate": 9.039769740923183e-06,
197
+ "loss": 1.9977,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 0.033373063170441,
202
+ "grad_norm": 0.26400158066485074,
203
+ "learning_rate": 8.404001049666211e-06,
204
+ "loss": 2.0056,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 0.03456495828367104,
209
+ "grad_norm": 0.23533776552109373,
210
+ "learning_rate": 7.774790660436857e-06,
211
+ "loss": 1.9724,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 0.03575685339690107,
216
+ "grad_norm": 0.22178984088070275,
217
+ "learning_rate": 7.154724133689677e-06,
218
+ "loss": 1.9487,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 0.03694874851013111,
223
+ "grad_norm": 0.23840728595761607,
224
+ "learning_rate": 6.546349455786926e-06,
225
+ "loss": 1.9871,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 0.03814064362336114,
230
+ "grad_norm": 0.2270923288108359,
231
+ "learning_rate": 5.952166568776062e-06,
232
+ "loss": 2.0023,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 0.03933253873659118,
237
+ "grad_norm": 0.21128333879501035,
238
+ "learning_rate": 5.37461709759165e-06,
239
+ "loss": 1.9781,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 0.04052443384982122,
244
+ "grad_norm": 0.20025698249162766,
245
+ "learning_rate": 4.81607431689475e-06,
246
+ "loss": 2.0167,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 0.041716328963051254,
251
+ "grad_norm": 0.19545125950438635,
252
+ "learning_rate": 4.278833398778306e-06,
253
+ "loss": 2.0014,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 0.04290822407628129,
258
+ "grad_norm": 0.21003372321244757,
259
+ "learning_rate": 3.7651019814126656e-06,
260
+ "loss": 2.0307,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 0.04410011918951132,
265
+ "grad_norm": 0.20875875072814898,
266
+ "learning_rate": 3.2769910973868314e-06,
267
+ "loss": 1.9922,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 0.04529201430274136,
272
+ "grad_norm": 0.20086863185331044,
273
+ "learning_rate": 2.8165064990227255e-06,
274
+ "loss": 2.009,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 0.04648390941597139,
279
+ "grad_norm": 0.19637651354676247,
280
+ "learning_rate": 2.3855404163086558e-06,
281
+ "loss": 2.0069,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 0.04767580452920143,
286
+ "grad_norm": 0.19298356773047148,
287
+ "learning_rate": 1.9858637813204352e-06,
288
+ "loss": 1.9649,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 0.04886769964243146,
293
+ "grad_norm": 0.18616123249372152,
294
+ "learning_rate": 1.6191189510815942e-06,
295
+ "loss": 1.988,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 0.050059594755661505,
300
+ "grad_norm": 0.17608440957049246,
301
+ "learning_rate": 1.286812958766106e-06,
302
+ "loss": 1.9493,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 0.05125148986889154,
307
+ "grad_norm": 0.17839806165567934,
308
+ "learning_rate": 9.903113209758098e-07,
309
+ "loss": 1.9827,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 0.052443384982121574,
314
+ "grad_norm": 0.171992208968052,
315
+ "learning_rate": 7.308324265397837e-07,
316
+ "loss": 2.0005,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 0.05363528009535161,
321
+ "grad_norm": 0.20157026593925115,
322
+ "learning_rate": 5.094425298933136e-07,
323
+ "loss": 1.9962,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 0.054827175208581644,
328
+ "grad_norm": 0.17529676457850735,
329
+ "learning_rate": 3.2705136960970554e-07,
330
+ "loss": 2.0125,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 0.05601907032181168,
335
+ "grad_norm": 0.1769836489001342,
336
+ "learning_rate": 1.844084300893456e-07,
337
+ "loss": 1.9917,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 0.057210965435041714,
342
+ "grad_norm": 0.17274305549148233,
343
+ "learning_rate": 8.209986176753947e-08,
344
+ "loss": 1.9941,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 0.058402860548271755,
349
+ "grad_norm": 0.1777317862287471,
350
+ "learning_rate": 2.054607249663665e-08,
351
+ "loss": 1.9912,
352
+ "step": 49
353
+ },
354
+ {
355
+ "epoch": 0.05959475566150179,
356
+ "grad_norm": 0.17680142652753666,
357
+ "learning_rate": 0.0,
358
+ "loss": 2.0301,
359
+ "step": 50
360
+ },
361
+ {
362
+ "epoch": 0.05959475566150179,
363
+ "step": 50,
364
+ "total_flos": 106890730143744.0,
365
+ "train_loss": 1.9979763078689574,
366
+ "train_runtime": 1596.1705,
367
+ "train_samples_per_second": 58.139,
368
+ "train_steps_per_second": 0.031
369
+ }
370
+ ],
371
+ "logging_steps": 1,
372
+ "max_steps": 50,
373
+ "num_input_tokens_seen": 0,
374
+ "num_train_epochs": 1,
375
+ "save_steps": 50,
376
+ "stateful_callbacks": {
377
+ "TrainerControl": {
378
+ "args": {
379
+ "should_epoch_stop": false,
380
+ "should_evaluate": false,
381
+ "should_log": false,
382
+ "should_save": true,
383
+ "should_training_stop": true
384
+ },
385
+ "attributes": {}
386
+ }
387
+ },
388
+ "total_flos": 106890730143744.0,
389
+ "train_batch_size": 58,
390
+ "trial_name": null,
391
+ "trial_params": null
392
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b187f08a9250c53419936909c7d1b21981cac2e57ac1118501185306c3fc8f5
3
+ size 7224
training_loss.png ADDED