metadata
base_model:
- allura-org/Qwen2.5-32b-RP-Ink
- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
- Aryanne/QwentileSwap
- Daemontatox/Cogito-Ultima
library_name: transformers
tags:
- mergekit
- merge
Qwetiapin
There's no 'I' in 'brain damage'
Overview
I'll write something here later
Quants
Merge Details
Merging Steps
Step1
dtype: bfloat16
tokenizer_source: base
merge_method: della_linear
parameters:
density: 0.5
epsilon: 0.4
lambda: 1.1
base_model: allura-org/Qwen2.5-32b-RP-Ink
models:
- model: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
parameters:
weight:
- filter: v_proj
value: [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]
- filter: o_proj
value: [1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1]
- filter: up_proj
value: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
- filter: gate_proj
value: [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]
- filter: down_proj
value: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- value: 0
- model: allura-org/Qwen2.5-32b-RP-Ink
parameters:
weight:
- filter: v_proj
value: [1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1]
- filter: o_proj
value: [0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0]
- filter: up_proj
value: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- filter: gate_proj
value: [1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1]
- filter: down_proj
value: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
- value: 1
Step2
models:
- model: Aryanne/QwentileSwap
parameters:
weight: [1.0, 0.9, 0.8, 0.9, 1.0]
- model: Daemontatox/Cogito-Ultima
parameters:
weight: [0, 0.1, 0.2, 0.1, 0]
merge_method: nuslerp
parameters:
nuslerp_row_wise: true
dtype: bfloat16
tokenizer_source: base
Step3
models:
- model: Step2
- model: Step1
merge_method: sce
base_model: Step2
parameters:
select_topk:
- value: [0.3, 0.35, 0.4, 0.35, 0.2]
dtype: bfloat16