|
base_model: meta-llama/Meta-Llama-3.1-8B
|
|
model_type: LlamaForCausalLM
|
|
tokenizer_type: AutoTokenizer
|
|
|
|
trust_remote_code: true
|
|
|
|
datasets:
|
|
- path: /workspace/datasets/openhermes_200k_unfiltered/Open_Hermes_200k_unfiltered.jsonl
|
|
type: sharegpt
|
|
conversation: chatml
|
|
- path: /workspace/datasets/magpie_function_calling/magpie_function_calling.jsonl
|
|
type: sharegpt
|
|
conversation: chatml
|
|
|
|
unfrozen_parameters:
|
|
- ^lm_head.weight$
|
|
- ^model.embed_tokens.weight$
|
|
|
|
- model.layers.0.input_layernorm
|
|
- model.layers.1.input_layernorm
|
|
- model.layers.2.input_layernorm
|
|
- model.layers.3.input_layernorm
|
|
- model.layers.4.input_layernorm
|
|
- model.layers.5.input_layernorm
|
|
- model.layers.6.input_layernorm
|
|
- model.layers.7.input_layernorm
|
|
- model.layers.8.input_layernorm
|
|
- model.layers.9.input_layernorm
|
|
- model.layers.10.input_layernorm
|
|
- model.layers.11.input_layernorm
|
|
- model.layers.12.input_layernorm
|
|
- model.layers.13.input_layernorm
|
|
- model.layers.14.input_layernorm
|
|
- model.layers.15.input_layernorm
|
|
|
|
|
|
- model.layers.1.mlp.down_proj
|
|
- model.layers.0.mlp.down_proj
|
|
- model.layers.30.mlp.down_proj
|
|
- model.layers.2.mlp.down_proj
|
|
- model.layers.21.mlp.down_proj
|
|
- model.layers.22.mlp.down_proj
|
|
- model.layers.29.mlp.down_proj
|
|
- model.layers.5.mlp.down_proj
|
|
- model.layers.4.mlp.down_proj
|
|
- model.layers.20.mlp.down_proj
|
|
- model.layers.23.mlp.down_proj
|
|
- model.layers.19.mlp.down_proj
|
|
- model.layers.3.mlp.down_proj
|
|
- model.layers.17.mlp.down_proj
|
|
- model.layers.6.mlp.down_proj
|
|
- model.layers.31.mlp.down_proj
|
|
|
|
- model.layers.1.mlp.gate_proj
|
|
- model.layers.2.mlp.gate_proj
|
|
- model.layers.3.mlp.gate_proj
|
|
- model.layers.4.mlp.gate_proj
|
|
- model.layers.0.mlp.gate_proj
|
|
- model.layers.25.mlp.gate_proj
|
|
- model.layers.26.mlp.gate_proj
|
|
- model.layers.5.mlp.gate_proj
|
|
- model.layers.24.mlp.gate_proj
|
|
- model.layers.28.mlp.gate_proj
|
|
- model.layers.23.mlp.gate_proj
|
|
- model.layers.27.mlp.gate_proj
|
|
- model.layers.21.mlp.gate_proj
|
|
- model.layers.22.mlp.gate_proj
|
|
- model.layers.29.mlp.gate_proj
|
|
- model.layers.20.mlp.gate_proj
|
|
|
|
- model.layers.4.mlp.up_proj
|
|
- model.layers.3.mlp.up_proj
|
|
- model.layers.0.mlp.up_proj
|
|
- model.layers.5.mlp.up_proj
|
|
- model.layers.7.mlp.up_proj
|
|
- model.layers.6.mlp.up_proj
|
|
- model.layers.2.mlp.up_proj
|
|
- model.layers.1.mlp.up_proj
|
|
- model.layers.8.mlp.up_proj
|
|
- model.layers.12.mlp.up_proj
|
|
- model.layers.14.mlp.up_proj
|
|
- model.layers.9.mlp.up_proj
|
|
- model.layers.15.mlp.up_proj
|
|
- model.layers.17.mlp.up_proj
|
|
- model.layers.13.mlp.up_proj
|
|
- model.layers.19.mlp.up_proj
|
|
|
|
|
|
|
|
- model.layers.0.post_attention_layernorm
|
|
- model.layers.1.post_attention_layernorm
|
|
- model.layers.2.post_attention_layernorm
|
|
- model.layers.3.post_attention_layernorm
|
|
- model.layers.4.post_attention_layernorm
|
|
- model.layers.5.post_attention_layernorm
|
|
- model.layers.6.post_attention_layernorm
|
|
- model.layers.7.post_attention_layernorm
|
|
- model.layers.8.post_attention_layernorm
|
|
- model.layers.9.post_attention_layernorm
|
|
- model.layers.10.post_attention_layernorm
|
|
- model.layers.11.post_attention_layernorm
|
|
- model.layers.12.post_attention_layernorm
|
|
- model.layers.13.post_attention_layernorm
|
|
- model.layers.14.post_attention_layernorm
|
|
- model.layers.15.post_attention_layernorm
|
|
|
|
- model.layers.29.self_attn.k_proj
|
|
- model.layers.25.self_attn.k_proj
|
|
- model.layers.23.self_attn.k_proj
|
|
- model.layers.28.self_attn.k_proj
|
|
- model.layers.21.self_attn.k_proj
|
|
- model.layers.19.self_attn.k_proj
|
|
- model.layers.22.self_attn.k_proj
|
|
- model.layers.20.self_attn.k_proj
|
|
- model.layers.24.self_attn.k_proj
|
|
- model.layers.31.self_attn.k_proj
|
|
- model.layers.27.self_attn.k_proj
|
|
- model.layers.26.self_attn.k_proj
|
|
- model.layers.17.self_attn.k_proj
|
|
- model.layers.11.self_attn.k_proj
|
|
- model.layers.18.self_attn.k_proj
|
|
- model.layers.14.self_attn.k_proj
|
|
|
|
- model.layers.14.self_attn.o_proj
|
|
- model.layers.7.self_attn.o_proj
|
|
- model.layers.5.self_attn.o_proj
|
|
- model.layers.11.self_attn.o_proj
|
|
- model.layers.6.self_attn.o_proj
|
|
- model.layers.24.self_attn.o_proj
|
|
- model.layers.9.self_attn.o_proj
|
|
- model.layers.13.self_attn.o_proj
|
|
- model.layers.10.self_attn.o_proj
|
|
- model.layers.12.self_attn.o_proj
|
|
- model.layers.8.self_attn.o_proj
|
|
- model.layers.25.self_attn.o_proj
|
|
- model.layers.21.self_attn.o_proj
|
|
- model.layers.23.self_attn.o_proj
|
|
- model.layers.15.self_attn.o_proj
|
|
- model.layers.16.self_attn.o_proj
|
|
|
|
- model.layers.8.self_attn.q_proj
|
|
- model.layers.13.self_attn.q_proj
|
|
- model.layers.9.self_attn.q_proj
|
|
- model.layers.14.self_attn.q_proj
|
|
- model.layers.10.self_attn.q_proj
|
|
- model.layers.11.self_attn.q_proj
|
|
- model.layers.0.self_attn.q_proj
|
|
- model.layers.15.self_attn.q_proj
|
|
- model.layers.1.self_attn.q_proj
|
|
- model.layers.6.self_attn.q_proj
|
|
- model.layers.5.self_attn.q_proj
|
|
- model.layers.7.self_attn.q_proj
|
|
- model.layers.12.self_attn.q_proj
|
|
- model.layers.16.self_attn.q_proj
|
|
- model.layers.17.self_attn.q_proj
|
|
- model.layers.26.self_attn.q_proj
|
|
|
|
- model.layers.26.self_attn.v_proj
|
|
- model.layers.17.self_attn.v_proj
|
|
- model.layers.3.self_attn.v_proj
|
|
- model.layers.28.self_attn.v_proj
|
|
- model.layers.29.self_attn.v_proj
|
|
- model.layers.21.self_attn.v_proj
|
|
- model.layers.15.self_attn.v_proj
|
|
- model.layers.16.self_attn.v_proj
|
|
- model.layers.20.self_attn.v_proj
|
|
- model.layers.25.self_attn.v_proj
|
|
- model.layers.6.self_attn.v_proj
|
|
- model.layers.23.self_attn.v_proj
|
|
- model.layers.4.self_attn.v_proj
|
|
- model.layers.1.self_attn.v_proj
|
|
- model.layers.22.self_attn.v_proj
|
|
- model.layers.14.self_attn.v_proj
|
|
|
|
|
|
chat_template: chatml
|
|
val_set_size: 0.01
|
|
output_dir: llama_3.1_8b_function_calling
|
|
data_seed: 49
|
|
seed: 49
|
|
|
|
sequence_len: 8192
|
|
sample_packing: true
|
|
pad_to_sequence_len: true
|
|
|
|
wandb_project: function-calling-spectrum
|
|
wandb_entity: therealagi-reviewramp
|
|
wandb_watch: gradients
|
|
wandb_name:
|
|
wandb_log_model:
|
|
|
|
gradient_accumulation_steps: 4
|
|
micro_batch_size: 2
|
|
num_epochs: 3
|
|
optimizer: adamw_torch
|
|
lr_scheduler: linear
|
|
learning_rate: 5e-6
|
|
|
|
train_on_inputs: false
|
|
group_by_length: false
|
|
bf16: auto
|
|
fp16:
|
|
tf32: false
|
|
|
|
gradient_checkpointing: true
|
|
early_stopping_patience:
|
|
resume_from_checkpoint:
|
|
local_rank:
|
|
logging_steps: 1
|
|
xformers_attention:
|
|
flash_attention: true
|
|
auto_resume_from_checkpoints: true
|
|
|
|
warmup_steps: 10
|
|
evals_per_epoch: 2
|
|
eval_table_size:
|
|
eval_max_new_tokens: 128
|
|
saves_per_epoch: 2
|
|
save_total_limit: 2
|
|
debug:
|
|
deepspeed: /workspace/axolotl/deepspeed_configs/zero3_bf16_cpuoffload_params.json
|
|
weight_decay: 0.05
|
|
fsdp:
|
|
fsdp_config:
|
|
special_tokens:
|
|
pad_token: "<|endoftext|>"
|
|
eos_token: "<|im_end|>" |