forked from johnjim0816/joyrl-offline
-
Notifications
You must be signed in to change notification settings - Fork 0
/
trainer.py
40 lines (40 loc) · 1.94 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
class Trainer:
def __init__(self) -> None:
pass
def train_one_episode(self, env, agent, cfg):
ep_reward = 0 # reward per episode
ep_step = 0
state = env.reset(seed = cfg.seed) # reset and obtain initial state
for _ in range(cfg.max_steps):
ep_step += 1
action = agent.sample_action(state) # sample action
if cfg.new_step_api:
next_state, reward, terminated, truncated , info = env.step(action) # update env and return transitions under new_step_api of OpenAI Gym
else:
next_state, reward, terminated, info = env.step(action) # update env and return transitions under old_step_api of OpenAI Gym
agent.update(state, action, reward, next_state, terminated) # update agent
state = next_state # update next state for env
ep_reward += reward #
if terminated:
break
res = {'ep_reward':ep_reward,'ep_step':ep_step}
return agent,res
def test_one_episode(self, env, agent, cfg):
ep_reward = 0 # reward per episode
ep_step = 0
state = env.reset(seed = cfg.seed) # reset and obtain initial state
for _ in range(cfg.max_steps):
if cfg.render:
env.render()
ep_step += 1
action = agent.predict_action(state) # sample action
if cfg.new_step_api:
next_state, reward, terminated, truncated , info = env.step(action) # update env and return transitions under new_step_api of OpenAI Gym
else:
next_state, reward, terminated, info = env.step(action) # update env and return transitions under old_step_api of OpenAI Gym
state = next_state # update next state for env
ep_reward += reward #
if terminated:
break
res = {'ep_reward':ep_reward,'ep_step':ep_step}
return agent,res