-
Notifications
You must be signed in to change notification settings - Fork 0
/
base_nrl.py
58 lines (49 loc) · 1.6 KB
/
base_nrl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import tensor
class MLP(nn.module):
super(MLP,self).__init__()
self.f1 = nn.Linear(n_input, n_hidden)
self.f2 = nn.Linear(n_hidden, n_hidden)
self.bn = nn.BatchNorm1d(n_out)
self.dropout_prob = prob
self.init_weights()
def init_weights():
for i in self.base_nrl():
if isinstance(i, nn.Linear):
nn.init.xavier_normal(i.weight.data)
i.bias.data.fill(0.1)
elif isinstance(i, nn.BatchNorm1d):
i.weight.data.fill_(1)
i.bias.data.zero_()
def batch_norm(self, inputs):
x = inputs.view(inputs.size(0) * inputs.size(1), -1)
x = self.bn(x)
return x.view(inputs.size(0) * inputs.size(1), -1)
def forward(self, inputs):
x = F.elu(self.f1(inputs))
x = F.dropout(x, self.dropout_prob)
x = F.elu(self.f2(x))
x = self.bn(x)
return x
class CNN(nn.module):
def __init__(self, n_inputs, n_hidden, n_out, dropout_prob=0.0):
super(CNN, self).__init__()
self.conv1 = nn.Conv1d(n_input, n_hidden, kernel_size = 5, stride=1, padding=0)
self.batch1 = nn.BatchNorm1d(n_hidden)
self.conv2 = nn.Conv1d(n_hidden, n_hidden, kernel_size = 5, stride = 1, padding =0)
self.batch2 = nn.BatchNorm1d(n_hidden)
self.conv_final = nn.Conv1d(n_hidden, n_out, kernel_size = 1)
self.dropout = dropout_prob
self.init_weights()
def init_weights(self):
for i in self.base_nrl():
if isinstance(i, nn.Conv1d):
j = i.kernel_size[0] * i.out_channels
i.weight.data.normal_(0.0, math.sqrt(2.0/j))
i.bias.data.fill_(0.0)
elif isinstance(i, nn.BatchNorm1d):
i.weight.data.fill_(1)
i.bias.data.zero_()