-
Notifications
You must be signed in to change notification settings - Fork 1
/
pretrain-slot-sample-fmt-96.yaml
128 lines (110 loc) · 2.83 KB
/
pretrain-slot-sample-fmt-96.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
model:
arch: vtgllm
model_type: pretrain_llama_v2
freeze_vit: True
freeze_qformer: False
# Q-Former
num_query_token: 32
vit_model: "model/eva-vit-g/eva_vit_g.pth"
llama_model: "model/Video-LLaMA-2-7B-Finetuned/llama-2-7b-chat-hf/"
q_former_model: "model/instruct-blip/instruct-blip/instruct_blip_vicuna7b_trimmed.pth"
ckpt: "model/Video-LLaMA-2-7B-Finetuned/VL_LLaMA_2_7B_Finetuned.pth"
# only train vision branch
use_video_qformer: False
frozen_llama_proj: False
frozen_video_Qformer: False
sampler_type: 'slot'
sample_num: 256
fusion_head_layers: 2
max_frame_pos: 96
fusion_header_type: "seqTransf"
max_txt_len: 2048
# for llama_2_chat:
end_sym: "</s>"
prompt_path: ""
prompt_template: '[INST] <<SYS>>\n \n<</SYS>>\n\n{} [/INST] '
use_grad_checkpoint: True
lora: True
lora_inference_mode: False
qformer_text_input: False
window_size: 32
stride: 32
# use real time stamp
real_time_stamp: True
max_time_pos: 8192
# use time special token
special_time_token: True
datasets:
time_instruct:
data_type: video
build_info:
anno_dir: "data/VTG-IT/vtg-it/vtg-it-120k.json"
videos_dir: "/group/40065/data"
vis_processor:
train:
name: "alpro_video_train"
n_frms: 96
image_size: 224
text_processor:
train:
name: "blip_caption"
num_video_query_token: 32
tokenizer_name: "model/Video-LLaMA-2-7B-Finetuned/llama-2-7b-chat-hf/"
model_type: "llama_v2"
num_frm: 96
sample_type: 'rand'
max_txt_len: 2048
stride: 32
special_time_token: True
strict_video_token_num: 256
visual_model_type: "eva_clip"
use_msg: True
valley72k_instruct:
data_type: video
build_info:
anno_dir: "data/VTG-IT/valley/time_token.json"
videos_dir: "/group/40065/data"
vis_processor:
train:
name: "alpro_video_train"
n_frms: 96
image_size: 224
text_processor:
train:
name: "blip_caption"
num_video_query_token: 32
tokenizer_name: "model/Video-LLaMA-2-7B-Finetuned/llama-2-7b-chat-hf/"
model_type: "llama_v2"
num_frm: 96
sample_type: 'rand'
max_txt_len: 2048
stride: 32
special_time_token: True
strict_video_token_num: 256
visual_model_type: "eva_clip"
use_msg: True
run:
task: video_text_pretrain
# optimizer
lr_sched: "linear_warmup_cosine_lr"
init_lr: 3e-5
min_lr: 1e-5
warmup_lr: 1e-6
weight_decay: 0.05
max_epoch: 10
iters_per_epoch: 13562 # (104k+72k)/16
batch_size_train: 1
batch_size_eval: 4
num_workers: 6
warmup_steps: 8137 # iters_per_epoch*0.6
accum_grad_iters: 4
seed: 42
output_dir: "ckpt/ours/slot_videollama_fm96_fmt"
amp: True
resume_ckpt_path: null
evaluate: False
train_splits: ["train"]
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True