-
Notifications
You must be signed in to change notification settings - Fork 1
/
noisy_mlp.py
106 lines (80 loc) · 3.58 KB
/
noisy_mlp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import math
import torch
from typing import Optional, Type
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from agents.models.components import BaseComponent
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class NoisyMLP(BaseComponent):
""" Helper module for creating noisy fully connected layers """
def __init__(
self,
layer_sizes: tuple,
activation_function: torch.nn.Module = nn.ReLU(True),
output_function: Optional[torch.nn.Module] = None,
dropout: Optional[float] = None,
batch_norm: bool = False
):
super().__init__()
layers = torch.nn.ModuleList([NoisyLinear(layer_sizes[0], layer_sizes[1])])
previous_output = layer_sizes[1]
for n_out in layer_sizes[2:]:
layers.append(activation_function)
if dropout:
layers.append(nn.Dropout(dropout))
if batch_norm:
layers.append(nn.BatchNorm1d(previous_output))
layers.append(NoisyLinear(previous_output, n_out))
previous_output = n_out
if output_function:
layers.append(output_function)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
def reset_noise(self):
for module in self.model.modules():
if hasattr(module, 'reset_noise'):
module.reset_noise()
class NoisyLinear(nn.Module):
"""Create a noisy linear layer
Adapted from https://github.com/higgsfield/RL-Adventure/blob/master/5.noisy%20dqn.ipynb
"""
def __init__(self, in_features, out_features, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))
self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))
self.reset_parameters()
self.reset_noise()
def forward(self, x):
weight_epsilon = self.weight_epsilon.to(device)
bias_epsilon = self.bias_epsilon.to(device)
if self.training:
weight = self.weight_mu + self.weight_sigma.mul(Variable(weight_epsilon))
bias = self.bias_mu + self.bias_sigma.mul(Variable(bias_epsilon))
else:
weight = self.weight_mu
bias = self.bias_mu
return F.linear(x, weight, bias)
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.weight_mu.size(1))
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(self._scale_noise(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
x = x.sign().mul(x.abs().sqrt())
return x