-
Notifications
You must be signed in to change notification settings - Fork 11
/
entry.py
executable file
·92 lines (76 loc) · 2.89 KB
/
entry.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Xueyan Zou ([email protected])
# --------------------------------------------------------
import os
import sys
import torch
import logging
#import wandb
import random
import numpy as np
from utilities.arguments import load_opt_command
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# def init_wandb(args, job_dir, entity='YOUR_USER_NAME', project='YOUR_PROJECT_NAME', job_name='tmp'):
# wandb_dir = os.path.join(job_dir, 'wandb')
# os.makedirs(wandb_dir, exist_ok=True)
# runid = None
# if os.path.exists(f"{wandb_dir}/runid.txt"):
# runid = open(f"{wandb_dir}/runid.txt").read()
# wandb.init(project=project,
# name=job_name,
# dir=wandb_dir,
# entity=entity,
# resume="allow",
# id=runid,
# config={"hierarchical": True},)
# open(f"{wandb_dir}/runid.txt", 'w').write(wandb.run.id)
# wandb.config.update({k: args[k] for k in args if k not in wandb.config})
def set_seed(seed: int = 42) -> None:
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
print(f"Random seed set as {seed}")
def main(args=None):
'''
[Main function for the entry point]
1. Set environment variables for distributed training.
2. Load the config file and set up the trainer.
'''
opt, cmdline_args = load_opt_command(args)
command = cmdline_args.command
if cmdline_args.user_dir:
absolute_user_dir = os.path.abspath(cmdline_args.user_dir)
opt['base_path'] = absolute_user_dir
# update_opt(opt, command)
world_size = 1
if 'OMPI_COMM_WORLD_SIZE' in os.environ:
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
if opt['TRAINER'] == 'xdecoder':
from trainer import XDecoder_Trainer as Trainer
else:
assert False, "The trainer type: {} is not defined!".format(opt['TRAINER'])
set_seed(opt['RANDOM_SEED'])
trainer = Trainer(opt)
os.environ['TORCH_DISTRIBUTED_DEBUG']='DETAIL'
if command == "train":
# if opt['rank'] == 0 and opt['WANDB']:
# wandb.login(key=os.environ['WANDB_KEY'])
# init_wandb(opt, trainer.save_folder, job_name=trainer.save_folder)
trainer.train()
elif command == "evaluate":
trainer.eval()
else:
raise ValueError(f"Unknown command: {command}")
if __name__ == "__main__":
main()
sys.exit(0)