defaults: - env_vars: env_vars - encoder: libero_dynamo - _self_ seed: 42 window_size: 10 goal_window_size: 10 eval_window_size: 10 batch_size: 64 epochs: 50 eval_freq: 10 eval_on_env_freq: 5 num_env_evals: 10 num_final_evals: 10 num_final_eval_per_goal: 10 action_window_size: 1 wandb: project: dynamo-repro entity: ${env_vars.wandb_entity} train_fraction: 0.9 device: cuda optim: lr: 5.5e-5 weight_decay: 2e-4 betas: [0.9, 0.999] env: gym: _target_: envs.libero.libero_env.LiberoEnv id: libero_goal obs_dim: 60 act_dim: 7 views: 2 dataset: _target_: datasets.libero.LiberoGoalDataset data_directory: ${env_vars.datasets.libero} data: window_size: ${window_size} action_window_size: ${action_window_size} vqbet_get_future_action_chunk: true goal_conditional: future future_seq_len: ${goal_window_size} min_future_sep: ${action_window_size} use_libero_goal: true save_every: 100 save_path: "${env_vars.save_path}/checkpoints/${env.gym.id}/${now:%Y-%m-%d}/${now:%H-%M-%S}" model: _target_: models.vq_behavior_transformer.bet.BehaviorTransformer obs_dim: ${encoder.output_dim} act_dim: ${env.act_dim} goal_dim: ${encoder.output_dim} views: ${env.views} vqvae_latent_dim: 512 vqvae_n_embed: 16 vqvae_groups: 2 vqvae_fit_steps: 884 vqvae_iters: 300 n_layer: 6 n_head: 6 n_embd: 120 vqvae_batch_size: 2048 act_scale: 1 obs_window_size: ${window_size} act_window_size: ${action_window_size} offset_loss_multiplier: 100