image/png

Note: More models will be added to this comparison.

image/png

Built with Axolotl

See axolotl config

axolotl version: 0.8.0.dev0

mlflow_tracking_uri: http://127.0.0.1:7860
mlflow_experiment_name: Qwen2.5-QwQ-RP-Draft-v0.2-1.5B-LoRA

# Hugging Face saving config
hub_model_id: BeaverAI/Qwen2.5-QwQ-RP-Draft-v0.2-1.5B-LoRA-WS
hub_strategy: every_save

# Model checkpointing config
output_dir: ./Outputs/Qwen2.5-QwQ-RP-Draft-v0.2-1.5B-LoRA
resume_from_checkpoint:
save_steps: 10
save_safetensors: true
save_total_limit: 3
save_only_model: false

# Model architecture config
base_model: Qwen/Qwen2.5-1.5B-Instruct
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer

# Mixed precision training config
bf16: true
fp16: false
tf32: false

# Model loading config
load_in_8bit: false
load_in_4bit: false
strict: false

# Sequence config
sequence_len: 8192
min_sample_len: 256
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true
train_on_inputs: false
group_by_length: false

# LoRA adapter config
adapter: lora
lora_model_dir:
lora_r: 128
lora_alpha: 128
lora_dropout: 0.125
peft_layers_to_transform:
peft_use_dora:
peft_use_rslora:
peft_layer_replication:
lora_target_modules:
  - gate_proj
  - down_proj
  - up_proj
  - q_proj
  - v_proj
  - k_proj
  - o_proj
lora_modules_to_save:

# Fix uninitialized tokens (such as <|start_header_id|> on the base L3 models)
fix_untrained_tokens:

# Dataset config
# https://github.com/xzuyn/axolotl/blob/prompt_formats/src/axolotl/prompt_strategies/customchatml-regex-last-only.py
datasets:
  - path: PJMixers-Dev/allura-org_gryphe-sonnet-3.5-charcards-names-added-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/anthracite-org_c2_logs_32k_llama3_qwen2_v1.3-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/grimulkan_aicg-logs-augmented-system-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/grimulkan_jannie-log-augmented-system-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/grimulkan_PIPPA-augmented-dedup-system-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/lemonilia_LimaRP-Only-NonSus-Simple-CustomShareGPT-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/MinervaAI_Aesir-Preview-Anon-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/NyxKrage_chub-logs-sharegpt-longest-CustomShareGPT-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/PocketDoc_Dans-Prosemaxx-Cowriter-XL-8192-shrunk-l3-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/PocketDoc_Dans-Personamaxx-Rainy-qwq-all-aphrodite-Shuffled
    split: train
    type: customchatml-regex-last-only
test_datasets:
  - path: PJMixers-Dev/allura-org_gryphe-sonnet-3.5-charcards-names-added-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/anthracite-org_c2_logs_32k_llama3_qwen2_v1.3-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/grimulkan_aicg-logs-augmented-system-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/grimulkan_jannie-log-augmented-system-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/grimulkan_PIPPA-augmented-dedup-system-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/lemonilia_LimaRP-Only-NonSus-Simple-CustomShareGPT-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/MinervaAI_Aesir-Preview-Anon-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/NyxKrage_chub-logs-sharegpt-longest-CustomShareGPT-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/PocketDoc_Dans-Prosemaxx-Cowriter-XL-8192-shrunk-l3-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
  - path: PJMixers-Dev/PocketDoc_Dans-Personamaxx-Rainy-qwq-all-aphrodite-Shuffled
    split: test
    type: customchatml-regex-last-only
val_set_size: 0
eval_strategy: steps
eval_steps: 10
dataset_prepared_path: ./00-Tokenized-Datasets/Qwen2.5-QwQ-Draft-v0.2-1.5B-customchatml-regex-last-only
shuffle_merged_datasets: true
dataset_processes:

# Training hyperparameters
num_epochs: 2
gradient_accumulation_steps: 1
micro_batch_size: 16  # x4 GPUs = 64
eval_batch_size: 16  # x4 GPUs = 64
warmup_steps: 0
optimizer: came_pytorch
optim_args:
optim_target_modules:
lr_scheduler: rex
learning_rate: 2e-5
cosine_min_lr_ratio:
loraplus_lr_ratio:
loraplus_lr_embedding:
weight_decay: 0.1
max_grad_norm: 1
logging_steps: 1

# Model optimization
gradient_checkpointing: unsloth
flash_attention: true
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
cut_cross_entropy: true
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
liger_cross_entropy: false
liger_fused_linear_cross_entropy: false
lora_mlp_kernel: false
lora_qkv_kernel: false
lora_o_kernel: false

# DeepSpeed
deepspeed: deepspeed_configs/zero3_bf16.json

# Garbage Collection
gc_steps: 1

# Debug config
debug: true
seed: 42

# Token config
special_tokens:
  eos_token: "<|endoftext|>"
  pad_token: "<|endoftext|>"
tokens:

Qwen2.5-QwQ-RP-Draft-v0.2-1.5B-LoRA-WS

This model is a fine-tuned version of Qwen/Qwen2.5-1.5B-Instruct on the PJMixers-Dev/allura-org_gryphe-sonnet-3.5-charcards-names-added-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/anthracite-org_c2_logs_32k_llama3_qwen2_v1.3-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/grimulkan_aicg-logs-augmented-system-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/grimulkan_jannie-log-augmented-system-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/grimulkan_PIPPA-augmented-dedup-system-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/lemonilia_LimaRP-Only-NonSus-Simple-CustomShareGPT-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/MinervaAI_Aesir-Preview-Anon-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/NyxKrage_chub-logs-sharegpt-longest-CustomShareGPT-qwq-all-aphrodite-Shuffled, the PJMixers-Dev/PocketDoc_Dans-Prosemaxx-Cowriter-XL-8192-shrunk-l3-qwq-all-aphrodite-Shuffled and the PJMixers-Dev/PocketDoc_Dans-Personamaxx-Rainy-qwq-all-aphrodite-Shuffled datasets. It achieves the following results on the evaluation set:

  • Loss: 1.7920

Model description

More information needed

Intended uses & limitations

More information needed

Training and evaluation data

More information needed

Training procedure

Training hyperparameters

The following hyperparameters were used during training:

  • learning_rate: 2e-05
  • train_batch_size: 16
  • eval_batch_size: 16
  • seed: 42
  • distributed_type: multi-GPU
  • num_devices: 4
  • total_train_batch_size: 64
  • total_eval_batch_size: 64
  • optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
  • lr_scheduler_type: cosine
  • num_epochs: 2.0

Training results

Training Loss Epoch Step Validation Loss
2.7253 0.0036 1 2.6463
2.4339 0.0362 10 2.3864
2.2821 0.0725 20 2.2571
2.207 0.1087 30 2.1771
2.1213 0.1449 40 2.1225
2.0926 0.1812 50 2.0870
2.0274 0.2174 60 2.0585
2.0292 0.2536 70 2.0338
2.0057 0.2899 80 2.0148
1.9841 0.3261 90 1.9975
1.9639 0.3623 100 1.9812
1.9303 0.3986 110 1.9688
1.9527 0.4348 120 1.9569
1.9389 0.4710 130 1.9459
1.9008 0.5072 140 1.9352
1.8996 0.5435 150 1.9259
1.8849 0.5797 160 1.9171
1.8793 0.6159 170 1.9105
1.8748 0.6522 180 1.9019
1.8697 0.6884 190 1.8959
1.8777 0.7246 200 1.8892
1.8731 0.7609 210 1.8833
1.8358 0.7971 220 1.8772
1.8483 0.8333 230 1.8729
1.8567 0.8696 240 1.8679
1.8461 0.9058 250 1.8632
1.8436 0.9420 260 1.8591
1.7992 0.9783 270 1.8545
1.7925 1.0145 280 1.8510
1.814 1.0507 290 1.8461
1.8249 1.0870 300 1.8431
1.7889 1.1232 310 1.8391
1.8116 1.1594 320 1.8367
1.7895 1.1957 330 1.8339
1.7951 1.2319 340 1.8300
1.7831 1.2681 350 1.8272
1.7919 1.3043 360 1.8250
1.7755 1.3406 370 1.8224
1.7895 1.3768 380 1.8209
1.7806 1.4130 390 1.8184
1.7808 1.4493 400 1.8156
1.7771 1.4855 410 1.8136
1.7636 1.5217 420 1.8118
1.7719 1.5580 430 1.8098
1.7716 1.5942 440 1.8074
1.7917 1.6304 450 1.8056
1.784 1.6667 460 1.8035
1.7846 1.7029 470 1.8019
1.7485 1.7391 480 1.8002
1.7381 1.7754 490 1.7987
1.764 1.8116 500 1.7972
1.744 1.8478 510 1.7964
1.7673 1.8841 520 1.7948
1.7926 1.9203 530 1.7935
1.7843 1.9565 540 1.7926
1.7689 1.9928 550 1.7920

Framework versions

  • PEFT 0.14.0
  • Transformers 4.49.0
  • Pytorch 2.6.0+cu124
  • Datasets 3.2.0
  • Tokenizers 0.21.1
Downloads last month
3
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no pipeline_tag.

Model tree for BeaverAI/Qwen2.5-QwQ-RP-Draft-v0.2-1.5B-LoRA-WS

Base model

Qwen/Qwen2.5-1.5B
Adapter
(424)
this model

Datasets used to train BeaverAI/Qwen2.5-QwQ-RP-Draft-v0.2-1.5B-LoRA-WS