-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLearning1.lua
More file actions
91 lines (70 loc) · 1.83 KB
/
Learning1.lua
File metadata and controls
91 lines (70 loc) · 1.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
--- конец
--- GPU---------------------------------------------------------
require 'torch'
require 'nn'
require 'csvigo'
require 'nnx'
require 'optim'
require 'cutorch'
require 'cunn'
trainPath = '/home/dmitry/nn/Spectr_MFNN.csv'
trainData= csvigo.load({path= trainPath, mode='raw', separator=';' })
-- testData = torch.load(testPath,'ascii')
csv_tensor = torch.Tensor(trainData)
input= csv_tensor:sub(1,30,2,1667)
input = input:cuda()
output2 = csv_tensor:sub(1,30,1,1)
output={};
for i=1,30
output[i]={};
output[i][1]=output2[i];
output[i][2]=output2[i]*i/10;
end
output2=output2:cuda()
-- define the mlp
mlp = nn.Sequential()
mlp:add(nn.Reshape(1666))
mlp:add(nn.Linear(1666, 512))
mlp:add(nn.Tanh())
mlp:add(nn.Linear(512, 256))
mlp:add(nn.Tanh())
mlp:add(nn.Linear(256, 128))
mlp:add(nn.Tanh())
mlp:add(nn.Linear(128, 1))
mlp:cuda()
loss = nn.MSECriterion()
loss:cuda()
--mlp:add(nn.LogSoftMax())
--loss = nn.ClassNLLCriterion()
-- Configuring optimizer
--local optim_state = {
-- learningRate = 0.01,
-- momentum = 0.6,--0.1,
-- weightDecay = 0.0005--1e-5
--}
--lbfgs
optim_state_lbfgs = {
learningRate = 0.5,
maxIter = 60,
nCorrection = 20
}
w,dE_dw = mlp:getParameters()
--dE_dw:cuda()
print("Starting gradient descent from 'optim' on GPU...")
function cuda_eval(w)
dE_dw:zero() -- Обновляем градиенты
local Y = mlp:forward(input)
local E = loss:forward(Y,output)
local dE_dy = loss:backward(Y,output)
mlp:backward(input,dE_dy)
return E, dE_dw
--return E, dE_dy --для пробы, возможно неправильно
end
mlp:reset()
timer = torch.Timer();
_,fw = optim.lbfgs(cuda_eval, w, optim_state_lbfgs);
cutorch.synchronize()
print(string.format('Success! Average iteration time was %f', timer:time().real))
print('Success!')
torch.cat(mlp:forward(input),output)
#fw