-
Notifications
You must be signed in to change notification settings - Fork 1
/
dueling_dqn.py
82 lines (67 loc) · 3.26 KB
/
dueling_dqn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.functions as F
import nnabla.solvers as S
import argparse
import gym
from nnabla.ext_utils import get_extension_context
from common.buffer import ReplayBuffer
from common.log import prepare_monitor
from common.experiment import evaluate, train
from common.exploration import LinearlyDecayEpsilonGreedy
from common.env import AtariWrapper
from common.network import nature_head
from dqn import DQN, update
def q_function(obs, num_actions, scope):
with nn.parameter_scope(scope):
out = nature_head(obs)
advantages = PF.affine(out, num_actions, name='advantage')
value = PF.affine(out, 1, name='value')
baseline = F.mean(advantages, axis=1, keepdims=True)
return value + advantages - baseline
def main(args):
if args.gpu:
ctx = get_extension_context('cudnn', device_id=str(args.device))
nn.set_default_context(ctx)
# atari environment
env = AtariWrapper(gym.make(args.env), args.seed, episodic=True)
eval_env = AtariWrapper(gym.make(args.env), 50, episodic=False)
num_actions = env.action_space.n
# action-value function built with neural network
model = DQN(q_function, num_actions, args.batch_size, args.gamma, args.lr)
if args.load is not None:
nn.load_parameters(args.load)
model.update_target()
buffer = ReplayBuffer(args.buffer_size, args.batch_size)
exploration = LinearlyDecayEpsilonGreedy(num_actions, args.epsilon, 0.1,
args.schedule_duration)
monitor = prepare_monitor(args.logdir)
update_fn = update(model, buffer, args.target_update_interval)
eval_fn = evaluate(eval_env, model, render=args.render)
train(env, model, buffer, exploration, monitor, update_fn, eval_fn,
args.final_step, args.update_start, args.update_interval,
args.save_interval, args.evaluate_interval, ['loss'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lr', type=float, default=2.5e-4)
parser.add_argument('--buffer-size', type=int, default=10 ** 5)
parser.add_argument('--epsilon', type=float, default=1.0)
parser.add_argument('--schedule-duration', type=int, default=10 ** 6)
parser.add_argument('--final-step', type=int, default=10 ** 7)
parser.add_argument('--target-update-interval', type=int, default=10 ** 4)
parser.add_argument('--update-start', type=int, default=5 * 10 ** 4)
parser.add_argument('--update-interval', type=int, default=4)
parser.add_argument('--evaluate-interval', type=int, default=10 ** 6)
parser.add_argument('--save-interval', type=int, default=10 ** 6)
parser.add_argument('--logdir', type=str, default='dueling_dqn')
parser.add_argument('--load', type=str)
parser.add_argument('--device', type=int, default='0')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--render', action='store_true')
args = parser.parse_args()
main(args)