-
Notifications
You must be signed in to change notification settings - Fork 54
/
Copy pathconfig_vae_humanml3d.yaml
82 lines (76 loc) · 2.28 KB
/
config_vae_humanml3d.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
NAME: 1222_PELearn_VAE_MEncDec49_MdiffEnc49_bs64_clip_uncond75_01 # Experiment name
DEBUG: False # Debug mode
ACCELERATOR: 'gpu' # Devices optioncal: “cpu”, “gpu”, “tpu”, “ipu”, “hpu”, “mps, “auto”
DEVICE: [0] # Index of gpus eg. [0] or [0,1,2,3]
# Training configuration
TRAIN:
#---------------------------------
STAGE: vae # stage "vae" or "diffusion", "vae_diffusion"
#---------------------------------
ABLATION:
SKIP_CONNECT: True
PE_TYPE: mld
DIFF_PE_TYPE: mld
DATASETS: ['humanml3d'] # Training datasets
NUM_WORKERS: 11 # Number of workers
BATCH_SIZE: 128 # Size of batches
START_EPOCH: 0 # Start epochMMOTIONENCODER
END_EPOCH: 6000 # End epoch
RESUME: '' # Resume training from this path
PRETRAINED_VAE: ''
OPTIM:
TYPE: AdamW # Optimizer type
LR: 1e-4 # Learning rate
# Evaluating Configuration
EVAL:
DATASETS: ['humanml3d'] # Evaluating datasets
BATCH_SIZE: 32 # Evaluating Batch size
SPLIT: test
# Test Configuration
TEST:
CHECKPOINTS: '' # Pretrained model path
DATASETS: ['humanml3d'] # training datasets
SPLIT: test
BATCH_SIZE: 32 # training Batch size
MEAN: False
NUM_SAMPLES: 1
FACT: 1
# Datasets Configuration
DATASET:
JOINT_TYPE: 'humanml3d' # join type
METRIC:
TYPE: ['TemosMetric', 'TM2TMetrics']
# Losses Configuration
LOSS:
TYPE: mld # Losses type
LAMBDA_LATENT: 1.0e-5 # Lambda for latent Losses
LAMBDA_KL: 1.0e-4 # Lambda for kl Losses
LAMBDA_REC: 1.0 # Lambda for reconstruction Losses
LAMBDA_GEN: 1.0 # Lambda for text-motion generation losses
LAMBDA_CROSS: 1.0 # Lambda for reconstruction Losses
LAMBDA_CYCLE: 0.0 # Lambda for cycle Losses
LAMBDA_PRIOR: 0.0
DIST_SYNC_ON_STEP: False # Sync Losses on step when distributed trained
# Model Configuration
model:
vae: true # whether vae model
model_type: mld # model type
condition: 'text'
latent_dim: [1, 256] # latent dimension
ff_size: 1024 #
num_layers: 9 # number of layers
num_head: 4 # number of head layers
droupout: 0.1 # dropout rate
activation: gelu # activation type
guidance_scale: 7.5 #
guidance_uncondp: 0.1 # 0.1 0.25
# Logger configuration
LOGGER:
SACE_CHECKPOINT_EPOCH: 200
LOG_EVERY_STEPS: 1
VAL_EVERY_STEPS: 200
TENSORBOARD: True
WANDB:
PROJECT: null
OFFLINE: False
RESUME_ID: null