models: - model: Qwen/Qwen2.5-32B - model: Qwen/QwQ-32B - model: Qwen/Qwen2.5-Coder-32B-Instruct merge_method: sce base_model: Qwen/Qwen2.5-32B parameters: select_topk: 0.6 layers_weights: - pattern: "transformer.h.[0-9]|1[0-5]" # Regex for layers 0-15 value: [0.0, 0.7, 0.3] - pattern: "transformer.h.(1[6-9]|[2-4][0-9]|5[0-9])" # Regex for layers 16-59 value: [0.0, 0.9, 0.1] - pattern: "transformer.h.(6[0-3])" # Regex for layers 60-63 value: [0.0, 0.8, 0.2] - pattern: "lm_head" value: [0.0, 0.8, 0.2] dtype: bfloat16