forked from NVIDIA/Megatron-LM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pretrain_vision_classify.sh
executable file
·64 lines (55 loc) · 1.68 KB
/
pretrain_vision_classify.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#! /bin/bash
# Pre-trains ViT based image classificaation model
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_IB_SL=1
# Training and validation paths should each point to a folder where each
# sub-folder contains a collection of images in jpg or png format
# e.g. If using imagenet, one train image might be, train_data/n01688243/n01688243_11301.JPEG
DATA_PATH_TRAIN=<Specify train data path>
DATA_PATH_VAL=<Specify validation data path>
CHECKPOINT_PATH=<Specify path>
CLASSIFIER_ARGS="
--tensor-model-parallel-size 1 \
--num-layers 12 \
--hidden-size 768 \
--num-attention-heads 12 \
--patch-dim 4 \
--seq-length 3136 \
--max-position-embeddings 3136 \
--img-h 224 \
--img-w 224 \
--mask-factor 1.0 \
--fp16 \
--train-iters 750000 \
--lr-decay-style cosine \
--micro-batch-size 4 \
--global-batch-size 1024 \
--lr 0.0005 \
--min-lr 0.00001 \
--attention-dropout 0.0 \
--weight-decay 0.05 \
--lr-warmup-iters 12500 \
--clip-grad 1.0 \
--no-gradient-accumulation-fusion \
--num-workers 4 \
--DDP-impl torch "
DATA_ARGS="
--tokenizer-type NullTokenizer \
--vocab-size 0 \
--data-path $DATA_PATH_TRAIN $DATA_PATH_VAL \
--no-data-sharding \
--split 949,50,1 \
"
OUTPUT_ARG="
--log-interval 32 \
--save-interval 10000 \
--eval-interval 2500 \
--eval-iters 100 \
--tensorboard-dir ${CHECKPOINT_PATH} \
"
torchrun pretrain_vision_classification.py \
$CLASSIFIER_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH