diff --git "a/mttl_config.json" "b/mttl_config.json" new file mode 100644--- /dev/null +++ "b/mttl_config.json" @@ -0,0 +1 @@ +{"base_model": "microsoft/Phi-3.5-mini-instruct", "class_name": "mttl.models.expert_model.MultiExpertModelConfig", "default_expert_name": null, "expert_infos": [{"expert_name": "information_extraction_and_comprehension_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "information_extraction_and_comprehension_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "information_extraction_and_comprehension_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_15", "exp_name": null, "expert_name": "information_extraction_and_comprehension_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 11, "total_steps": 195, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_information_extraction_and_comprehension_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "information_extraction_and_transformation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "information_extraction_and_transformation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "information_extraction_and_transformation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_16", "exp_name": null, "expert_name": "information_extraction_and_transformation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 5, "total_steps": 85, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_information_extraction_and_transformation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "diverse_question-answer_tasks_with_contextual_understanding", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "diverse_question-answer_tasks_with_contextual_understanding", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "diverse_question-answer_tasks_with_contextual_understanding", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_5", "exp_name": null, "expert_name": "diverse_question-answer_tasks_with_contextual_understanding", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 18, "total_steps": 315, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_diverse_question-answer_tasks_with_contextual_understanding", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "summarization_and_condensation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "summarization_and_condensation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "summarization_and_condensation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_25", "exp_name": null, "expert_name": "summarization_and_condensation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 10, "total_steps": 170, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_summarization_and_condensation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "generating_questions_based_on_given_information", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "generating_questions_based_on_given_information", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "generating_questions_based_on_given_information", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_12", "exp_name": null, "expert_name": "generating_questions_based_on_given_information", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 37, "total_steps": 625, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_generating_questions_based_on_given_information", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "answering_specific_questions_based_on_provided_information", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "answering_specific_questions_based_on_provided_information", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "answering_specific_questions_based_on_provided_information", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_0", "exp_name": null, "expert_name": "answering_specific_questions_based_on_provided_information", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 160, "total_steps": 2675, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_answering_specific_questions_based_on_provided_information", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "generating_summaries_and_titles_for_texts", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "generating_summaries_and_titles_for_texts", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "generating_summaries_and_titles_for_texts", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_13", "exp_name": null, "expert_name": "generating_summaries_and_titles_for_texts", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 30, "total_steps": 515, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_generating_summaries_and_titles_for_texts", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "summarization_and_title_generation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "summarization_and_title_generation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "summarization_and_title_generation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_26", "exp_name": null, "expert_name": "summarization_and_title_generation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 160, "total_steps": 2675, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_summarization_and_title_generation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "evaluative_and_interpretative_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "evaluative_and_interpretative_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "evaluative_and_interpretative_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_9", "exp_name": null, "expert_name": "evaluative_and_interpretative_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 24, "total_steps": 410, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_evaluative_and_interpretative_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "short_story_continuation_and_completion_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "short_story_continuation_and_completion_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "short_story_continuation_and_completion_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_23", "exp_name": null, "expert_name": "short_story_continuation_and_completion_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 30, "total_steps": 510, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_short_story_continuation_and_completion_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "generating_and_extracting_textual_content", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "generating_and_extracting_textual_content", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "generating_and_extracting_textual_content", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_11", "exp_name": null, "expert_name": "generating_and_extracting_textual_content", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 6, "total_steps": 115, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_generating_and_extracting_textual_content", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "logical_reasoning_and_inference_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "logical_reasoning_and_inference_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "logical_reasoning_and_inference_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_17", "exp_name": null, "expert_name": "logical_reasoning_and_inference_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 92, "total_steps": 1535, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_logical_reasoning_and_inference_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "identifying_and_analyzing_pronoun_references_and_narrators", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "identifying_and_analyzing_pronoun_references_and_narrators", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "identifying_and_analyzing_pronoun_references_and_narrators", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_14", "exp_name": null, "expert_name": "identifying_and_analyzing_pronoun_references_and_narrators", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 9, "total_steps": 165, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_identifying_and_analyzing_pronoun_references_and_narrators", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "extracting_specific_information_from_textual_contexts", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "extracting_specific_information_from_textual_contexts", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "extracting_specific_information_from_textual_contexts", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_10", "exp_name": null, "expert_name": "extracting_specific_information_from_textual_contexts", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 329, "total_steps": 5495, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_extracting_specific_information_from_textual_contexts", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "concise_question-answering_and_information_extraction", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "concise_question-answering_and_information_extraction", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "concise_question-answering_and_information_extraction", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_2", "exp_name": null, "expert_name": "concise_question-answering_and_information_extraction", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 3, "total_steps": 50, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_concise_question-answering_and_information_extraction", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "diverse_question-answer_and_translation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "diverse_question-answer_and_translation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "diverse_question-answer_and_translation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_4", "exp_name": null, "expert_name": "diverse_question-answer_and_translation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 0, "total_steps": 10, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_diverse_question-answer_and_translation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "mathematical_problem_solving_and_calculation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "mathematical_problem_solving_and_calculation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "mathematical_problem_solving_and_calculation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_18", "exp_name": null, "expert_name": "mathematical_problem_solving_and_calculation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 147, "total_steps": 2465, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_mathematical_problem_solving_and_calculation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "evaluating_and_comparing_textual_information", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "evaluating_and_comparing_textual_information", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "evaluating_and_comparing_textual_information", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_8", "exp_name": null, "expert_name": "evaluating_and_comparing_textual_information", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 28, "total_steps": 470, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_evaluating_and_comparing_textual_information", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "error_identification_and_sequence_validation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "error_identification_and_sequence_validation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "error_identification_and_sequence_validation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_6", "exp_name": null, "expert_name": "error_identification_and_sequence_validation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 8, "total_steps": 135, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_error_identification_and_sequence_validation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "text_formatting_and_correction_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "text_formatting_and_correction_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "text_formatting_and_correction_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_27", "exp_name": null, "expert_name": "text_formatting_and_correction_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 21, "total_steps": 350, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_text_formatting_and_correction_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "multilingual_translation_and_interpretation_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "multilingual_translation_and_interpretation_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "multilingual_translation_and_interpretation_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_19", "exp_name": null, "expert_name": "multilingual_translation_and_interpretation_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 128, "total_steps": 2135, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_multilingual_translation_and_interpretation_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "classification_and_categorization_tasks", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "classification_and_categorization_tasks", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "classification_and_categorization_tasks", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_1", "exp_name": null, "expert_name": "classification_and_categorization_tasks", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 36, "total_steps": 600, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_classification_and_categorization_tasks", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "sentiment_analysis_and_review_classification", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "sentiment_analysis_and_review_classification", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "sentiment_analysis_and_review_classification", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_22", "exp_name": null, "expert_name": "sentiment_analysis_and_review_classification", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 94, "total_steps": 1570, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_sentiment_analysis_and_review_classification", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "creative_and_contextual_writing_prompts", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "creative_and_contextual_writing_prompts", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "creative_and_contextual_writing_prompts", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_3", "exp_name": null, "expert_name": "creative_and_contextual_writing_prompts", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 49, "total_steps": 820, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_creative_and_contextual_writing_prompts", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "evaluating_and_comparing_sentence_meanings_and_contexts", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "evaluating_and_comparing_sentence_meanings_and_contexts", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "evaluating_and_comparing_sentence_meanings_and_contexts", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_7", "exp_name": null, "expert_name": "evaluating_and_comparing_sentence_meanings_and_contexts", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 76, "total_steps": 1270, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_evaluating_and_comparing_sentence_meanings_and_contexts", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}, {"expert_name": "step-by-step_how-to_guides", "class_name": "mttl.models.library.expert_library.MetadataEntry", "expert_task_name": "step-by-step_how-to_guides", "parent_node": null, "expert_config": {"modify_modules": ".*", "class_name": "mttl.models.modifiers.lora.LoRAConfig", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false}, "training_config": {"max_input_length": 2048, "class_name": "mttl.arguments.ExpertConfig", "tokenizer": null, "model_family": "gpt", "modify_modules": ".*", "modify_layers": "qkv_proj|o_proj|down_proj|gate_up_proj", "tie_params": null, "model": "microsoft/Phi-3.5-mini-instruct", "soft_prompt_length": 10, "soft_prompt_learn_kv": true, "n_tasks": null, "patch_last_k_layers": -1, "lora_rank": 16, "lora_alpha": 32, "lora_dropout": 0.05, "lora_init_b_random": false, "n_skills": 1, "n_splits": 1, "phi_2_align_heads": false, "n_embd": 2560, "n_heads": 8, "moe_num_experts": 100, "emb_dim": 128, "down_proj_layer": "fc1", "up_proj_layer": "fc2", "prompt_placement": "prefix", "dataset": "local:///mnt/default/data/modular_chatbot/pretty_orca_gpt-4o-gs_tags-30_contrastive_inferred_gpt-4o-mini_n300000", "data_dir": "/tmp/", "train_batch_size": 1, "predict_batch_size": 10, "max_output_length": 1024, "validation_portion": null, "padding_side": "right", "truncation_side": "left", "train_on_inputs": false, "add_eos_to_targets": true, "add_eos_to_downstream_targets": true, "finetune_task_name": "step-by-step_how-to_guides", "subsample_train": null, "subsample_dev": null, "subsample_test": null, "subsample_per_task": false, "subsample": -1, "pack_sequences": true, "pad_to_multiple_of": 8, "max_seq_per_pack": 4, "task_id_field": "task_id", "task_name_field": "tag", "task_source_field": "tag", "arc_type": "ARC-Easy", "few_shot": true, "augment_mmlu": false, "source_template": null, "augment_few_shot": 0, "include_template_type": "*", "include_task_source": "P3,Flan2021,CoT", "remove_phi_eval_tasks": false, "attn_implementation": null, "device_map": "cuda:0", "load_in_4bit": false, "load_in_8bit": 0, "do_train": true, "cache_dir": "./cache", "output_dir": "/mnt/output/projects/modular-chatbot/amlt-results/17c8a0c6-0542-4969-9ff4-47bfb37700ec_24", "exp_name": null, "expert_name": "step-by-step_how-to_guides", "micro_batch_size": 1, "compute_strategy": "auto", "scheduler": "linear_decay_with_warmup", "checkpoint": null, "checkpoint_step": null, "backbone_checkpoint": null, "learning_rate": 0.0001, "warmup_proportion": 0.06, "trainable_param_names": ".*lora_[ab].*", "non_trainable_param_names": null, "weight_decay": 0.0, "adam_epsilon": 1e-08, "max_grad_norm": 0.1, "optimizer": "adamw", "adafactor_scale_parameter": true, "adafactor_warmup_init": false, "adafactor_relative_step": false, "num_train_epochs": 5, "warmup_steps": 30, "total_steps": 505, "num_tasks_per_batch": null, "save_every": null, "save_each_epoch": false, "eval_every": null, "eval_every_n_epoch": 1, "seed": 42, "debug": false, "precision": "bf16", "monitor_grad_alignment_on": null, "wandb_project": null, "wandb_run_name": "phi3.5_tag_step-by-step_how-to_guides", "tensorboard": false, "remote_token": null, "library_id": "hf://sordonia/Phi-3.5-mini-instruct-28Aug", "destination_library_id": null, "logging_prefix": "", "router_weight_decay": null, "router_learning_rate": null, "module_logits_relaxed_bernoulli": true, "module_logits_straight_through": false, "module_logits_learning_rate": 0.1, "adapters_learning_rate": null, "adapters_weight_decay": null, "module_logits_dropout": 0.0, "module_logits_l2_norm": false, "eval_mmlu_few_shot": true, "eval_mmlu_flag": false, "eval_rouge_flag": false, "eval_before_training": true, "create_transfer_matrix": false, "pipeline_eval_tasks": "arc-challenge,arc-easy,boolq,hellaswag,humaneval,mbpp,openbookqa,piqa,bbh-fast,winogrande", "save_if_loaded_from_ckpt": true, "dataset_type": "flat_multitask", "model_modifier": "lora"}, "expert_model": "microsoft/Phi-3.5-mini-instruct", "expert_deleted": false}], "selector_config": {"router_granularity": "*", "class_name": "mttl.models.containers.selectors.arrow_selector.ArrowSelectorConfig", "lora_merge_after": false, "selector_logging": false, "num_experts": 0, "library_id": "sordonia/Phi-3.5-mini-instruct-28Aug", "selector_data_id": null, "router_temp": 1.0, "top_k": 2, "proto_init": "arrow", "input_norm_fn": "id", "proto_norm_fn": "id"}} \ No newline at end of file