forked from atgambardella/pytorch-es
-
Notifications
You must be signed in to change notification settings - Fork 0
/
envs.py
126 lines (97 loc) · 4.2 KB
/
envs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Taken from https://github.com/ikostrikov/pytorch-a3c
from __future__ import absolute_import, division, print_function
import numpy as np
import gym
from gym.spaces.box import Box
from universe import vectorized
from universe.wrappers import Unvectorize, Vectorize
import cv2
# Taken from https://github.com/openai/universe-starter-agent
def create_atari_env(env_id, frame_stack_size=1, noop_init=0, image_dim=42):
gym.logger.setLevel(gym.logging.WARN)
env = gym.make(env_id)
if len(env.observation_space.shape) > 1:
# print('Preprocessing env')
env = Vectorize(env)
env = AtariRescale(env, dim=image_dim)
env = NormalizedEnv(env)
if frame_stack_size > 1:
env = Stack(env, frame_stack_size=frame_stack_size)
if noop_init > 0:
env = NoopInit(env, max_n_noops=noop_init)
env = Unvectorize(env)
else:
print('No preprocessing because env is too small')
return env
def _process_frame(frame, dim=42):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
if dim < 80:
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (dim, dim))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [1, dim, dim])
return frame
class AtariRescale(vectorized.ObservationWrapper):
def __init__(self, env=None, dim=42):
super(AtariRescale, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [1, dim, dim])
self.dim = dim
def _observation(self, observation_n):
return [_process_frame(observation, dim=self.dim) for observation in observation_n]
class NormalizedEnv(vectorized.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.max_episode_length = 0
def _observation(self, observation_n):
for observation in observation_n:
self.max_episode_length += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
denom = (1 - pow(self.alpha, self.max_episode_length))
unbiased_mean = self.state_mean / denom
unbiased_std = self.state_std / denom
return [(observation - unbiased_mean) / (unbiased_std + 1e-8)
for observation in observation_n]
class Stack(vectorized.ObservationWrapper):
def __init__(self, env=None, frame_stack_size=1):
super(Stack, self).__init__(env)
self.frame_stack_size = frame_stack_size
self.previous_stack = None
high,low,shape = env.observation_space.high, env.observation_space.low, env.observation_space.shape
high = high.repeat(frame_stack_size,axis=0)
low = low.repeat(frame_stack_size,axis=0)
self.observation_space = Box(high,low)
def _reset(self):
self.previous_stack = None
return super(Stack, self)._reset()
def _observation(self, observation_n):
if self.previous_stack is None:
self.previous_stack = observation_n * self.frame_stack_size
self.previous_stack = (self.previous_stack + observation_n)[-self.frame_stack_size:]
return [np.vstack(self.previous_stack)]
class NoopInit(vectorized.ObservationWrapper):
def __init__(self, env=None, max_n_noops=30):
super(NoopInit, self).__init__(env)
self.max_n_noops = max_n_noops
self.rng = np.random.RandomState()
def _seed(self, seed=None):
parent_seeds = super(NoopInit, self)._seed(seed)
self.rng.seed(parent_seeds[0])
return parent_seeds
def _reset(self):
state = super(NoopInit, self)._reset()
for _ in range(self.rng.randint(self.max_n_noops+1)):
state,_,_,_ = self.step([0])
return state
def _observation(self, observation_n):
return observation_n