Skip to content

Commit 3691620

Browse files
committed
Initial Commit
0 parents  commit 3691620

File tree

6 files changed

+1452
-0
lines changed

6 files changed

+1452
-0
lines changed

Covariances.py

Lines changed: 220 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,220 @@
1+
# -*- coding: utf-8 -*-
2+
from __future__ import division
3+
import numpy as np
4+
import PSim
5+
import pickle
6+
import csv
7+
import simplex
8+
9+
fit_type = 'global'
10+
scale = 0.003
11+
12+
with open("pickled_data.p", "r") as file:
13+
pickled_data = pickle.load(file)
14+
15+
powers = pickled_data['powers']
16+
xdata = pickled_data['allxdata']
17+
ydata = pickled_data['allydata']
18+
xarray = pickled_data['xarray']
19+
yarrays = pickled_data['yarrays']
20+
averages = pickled_data['averages']
21+
period = 50 # ns
22+
23+
with open("pickled_data_250.p", "r") as file:
24+
pickled_data_250 = pickle.load(file)
25+
26+
powers_250 = pickled_data_250['powers']
27+
xdata_250 = pickled_data_250['allxdata']
28+
ydata_250 = pickled_data_250['allydata']
29+
xarray_250 = pickled_data_250['xarray']
30+
yarrays_250 = pickled_data_250['yarrays']
31+
averages_250 = pickled_data_250['averages']
32+
period_250 = 1.0 / 250000.0 / 1e-9 # ns
33+
34+
35+
def scalar_min(p, data):
36+
xdata, ydata, ysim = data[0]
37+
xdata_250, ydata_250, ysim_250 = data[1]
38+
scaled_ysim = ysim * p[0]
39+
scaled_ysim_250 = ysim_250 * p[0]
40+
err_20 = 0
41+
err_250 = 0
42+
num_points = 0
43+
for dat, sim in zip(ydata, scaled_ysim):
44+
for x, d, s in zip(xdata, dat, sim):
45+
try:
46+
if s > 0:
47+
log_s = np.log(s)
48+
else:
49+
log_s = 0
50+
log_d = np.log(d)
51+
error = (log_s - log_d)
52+
# error = np.log(error)
53+
err_20 += error*error
54+
num_points = num_points + 1
55+
except:
56+
err_20 += 8e20
57+
err_20 = err_20 / num_points
58+
num_points = 0
59+
for dat, sim in zip(ydata_250[:-1], scaled_ysim_250[:-1]):
60+
for x, d, s in zip(xdata_250, dat, sim):
61+
try:
62+
if s > 0:
63+
log_s = np.log(s)
64+
else:
65+
log_s = 0
66+
log_d = np.log(d)
67+
error = (log_s - log_d)
68+
# error = np.log(error)
69+
if x >= -0.25 and x <= 120:
70+
err_250 += error*error
71+
num_points = num_points + 1
72+
except:
73+
err_250 += 8e20
74+
err_250 = err_250 / num_points
75+
err = np.sqrt(err_250*err_20)
76+
if np.isnan(err):
77+
err = 7e20
78+
fitness = err * 100
79+
return fitness
80+
81+
82+
def evaluate(p):
83+
dummy_x = np.zeros(10)
84+
dummy_y = np.zeros([10, 10])
85+
data = [[dummy_x, dummy_y, dummy_y], [dummy_x, dummy_y, dummy_y]]
86+
if fit_type is 'global' or fit_type is 20: # 20 MHz data
87+
sim = PSim.DecaySim(reprate=20000000, tolerance=0.005, step=5e-12)
88+
sim.trap = p[0]
89+
sim.EHdecay = p[1] * sim.step
90+
sim.Etrap = p[2] * sim.step
91+
sim.FHloss = p[3] * sim.step
92+
sim.Gdecay = p[4] * sim.step
93+
sim.G2decay = p[5] * sim.step
94+
sim.G3decay = p[6] * sim.step
95+
sim.GHdecay = p[7] * sim.step
96+
sim.Gescape = p[8] * sim.step
97+
sim.Gform = p[9] * sim.step * 0
98+
sim.G3loss = p[9] * sim.step
99+
sim.scalar = 1
100+
for power in powers:
101+
sim.addPower(power)
102+
sim.runSim()
103+
interp_signals = []
104+
for this_run in sim.signal:
105+
interp_this = np.interp(xarray, sim.xdata, this_run)
106+
interp_signals.append(interp_this)
107+
interp_signals = np.array(interp_signals)
108+
data[0] = [xarray, yarrays, interp_signals]
109+
if fit_type is 'global' or fit_type is 250: # 250 kHz data
110+
sim_250 = PSim.DecaySim(reprate=250000, tolerance=0.005, step=5e-12)
111+
sim_250.trap = p[0]
112+
sim_250.EHdecay = p[1] * sim_250.step
113+
sim_250.Etrap = p[2] * sim_250.step
114+
sim_250.FHloss = p[3] * sim_250.step
115+
sim_250.Gdecay = p[4] * sim_250.step
116+
sim_250.G2decay = p[5] * sim_250.step
117+
sim_250.G3decay = p[6] * sim_250.step
118+
sim_250.GHdecay = p[7] * sim_250.step
119+
sim_250.Gescape = p[8] * sim_250.step
120+
sim_250.Gform = p[9] * sim_250.step * 0
121+
sim_250.G3loss = p[9] * sim_250.step
122+
sim_250.scalar = 1
123+
for power in powers_250:
124+
sim_250.addPower(power)
125+
sim_250.runSim()
126+
interp_signals_250 = []
127+
for this_run in sim_250.signal:
128+
interp_this = np.interp(xarray_250, sim_250.xdata, this_run)
129+
interp_signals_250.append(interp_this)
130+
interp_signals_250 = np.array(interp_signals_250)
131+
data[1] = [xarray_250, yarrays_250, interp_signals_250]
132+
# Use a simplex minimization to find the best scalar
133+
scalar0 = np.array([3e-26])
134+
ranges = scalar0*0.1
135+
s = simplex.Simplex(scalar_min, scalar0, ranges)
136+
values, fitness, iter = s.minimize(epsilon=0.00001, maxiters=500,
137+
monitor=0, data=data)
138+
scalar = values[0]
139+
#p[-1] = scalar
140+
if scalar < 0:
141+
fitness = 1e30
142+
return fitness
143+
144+
145+
def main():
146+
logname = 'best_{}.log'.format(fit_type)
147+
with open(logname, 'rb') as best_file:
148+
reader = csv.reader(best_file, dialect='excel-tab')
149+
p0 = []
150+
for val in reader.next():
151+
p0.append(np.float(val))
152+
dim = 11
153+
pi = np.ones(dim)
154+
for i, n in enumerate([0,1,2,3,4,5,6,7,8,9,10]):
155+
pi[i] = p0[n]
156+
ps1 = np.ndarray([dim, dim, dim])
157+
ps2 = np.ndarray([dim, dim, dim])
158+
fitness1 = np.ndarray([dim, dim])
159+
fitness2 = np.ndarray([dim, dim])
160+
differences = scale*pi
161+
for i in range(dim):
162+
for j in range(dim):
163+
for k in range(dim):
164+
val1 = pi[k]
165+
val2 = pi[k]
166+
if i == k or j == k:
167+
val1 = val1 + differences[k]
168+
val2 = val2 - differences[k]
169+
ps1[i][j][k] = val1
170+
ps2[i][j][k] = val2
171+
for i in range(dim):
172+
for j in range(i, dim):
173+
fitness1[i][j] = evaluate(ps1[i][j])
174+
fitness1[j][i] = fitness1[i][j]
175+
fitness2[i][j] = evaluate(ps2[i][j])
176+
fitness2[j][i] = fitness2[i][j]
177+
error0 = evaluate(pi)
178+
data = {'fitness1': fitness1,
179+
'fitness2': fitness2,
180+
'differences': differences,
181+
'error0': error0}
182+
with open("covariance_data_{}.p".format(scale), "wb") as file:
183+
pickle.dump(data, file)
184+
hessian = np.ndarray([dim, dim])
185+
for i in range(dim):
186+
for j in range(dim):
187+
if i == j:
188+
d2i = differences[i]
189+
df1 = (fitness1[i][j] - error0) / d2i
190+
df2 = (error0 - fitness2[i][j]) / d2i
191+
hessian[i][j] = (df1 - df2) / (d2i)
192+
else:
193+
df1di1 = (fitness1[i][i] - error0) / differences[i]
194+
df1di2 = (fitness1[i][j] - fitness1[j][j]) / differences[i]
195+
dff1didj = (df1di2 - df1di1) / differences[j]
196+
df2di1 = (error0 - fitness2[i][i]) / differences[i]
197+
df2di2 = (fitness2[j][j] - fitness2[i][j]) / differences[i]
198+
dff2didj = (df2di2 - df2di1) / differences[j]
199+
hessian[i][j] = (dff1didj + dff2didj) / 2
200+
hessian[j][i] = hessian[i][j]
201+
with open("hessian_{}.p".format(scale), "wb") as file:
202+
pickle.dump(hessian, file)
203+
m_hessian = np.matrix(hessian)
204+
covariance = np.linalg.inv(m_hessian)
205+
cv_array = np.array(covariance)
206+
paramaters=['Traps', 'EH_Decay', 'E_Trap', 'TH_loss', 'G_Decay', 'G2_Decay', 'G3_Decay', 'GH_Decay', 'G_Escape', 'G3_Loss']
207+
for i in range(dim):
208+
print('{}{}: {} +- {}'.format(' ' * (8-len(paramaters[i])), paramaters[i], p0[i], np.sqrt(cv_array[i][i])))
209+
with open('Parameters_{}.txt'.format(scale), 'w') as f:
210+
writer = csv.writer(f, dialect="excel-tab")
211+
for i in range(10):
212+
error = np.sqrt(cv_array[i][i])
213+
relerror = error / pi[i] * 100
214+
words = '{}{}: {} +- {} ({}%)'.format(' ' * (8-len(paramaters[i])), paramaters[i], pi[i], error, relerror)
215+
print(words)
216+
writer.writerow([words])
217+
218+
219+
if __name__ == '__main__':
220+
main()

Deviations.py

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# -*- coding: utf-8 -*-
2+
from __future__ import division
3+
import multiprocessing
4+
import multiprocessing.pool
5+
import numpy as np
6+
import PSim
7+
import pickle
8+
import csv
9+
import simplex
10+
from matplotlib import pyplot as plt
11+
12+
fit_type = 'global'
13+
logname = 'best_{}.log'.format(fit_type)
14+
with open(logname, 'rb') as best_file:
15+
reader = csv.reader(best_file, dialect='excel-tab')
16+
p0 = []
17+
for val in reader.next():
18+
p0.append(np.float(val))
19+
p0 = np.array(p0[:-1])
20+
dim = 10
21+
pi = np.ones(dim)
22+
for i, n in enumerate([0,1,2,3,4,5,6,7,8,10]):
23+
pi[i] = p0[n]
24+
differences = 0.001*pi
25+
with open("pickled_data_{}.p".format(fit_type), "r") as file:
26+
pickled_data = pickle.load(file)
27+
with open("covariance_data_0.001.p".format(fit_type), "r") as file:
28+
pickled_data = pickle.load(file)
29+
fitness1 = pickled_data['fitness1']
30+
fitness2 = pickled_data['fitness2']
31+
error0 = pickled_data['error0']
32+
hessian = np.ndarray([10, 10])
33+
for i in range(10):
34+
for j in range(10):
35+
if i == j:
36+
d2i = differences[i]
37+
df1 = (fitness1[i][j] - error0) / d2i
38+
df2 = (error0 - fitness2[i][j]) / d2i
39+
hessian[i][j] = (df1 - df2) / (d2i)
40+
else:
41+
df1di1 = (fitness1[i][i] - error0) / differences[i]
42+
df1di2 = (fitness1[i][j] - fitness1[j][j]) / differences[i]
43+
dff1didj = (df1di2 - df1di1) / differences[j]
44+
df2di1 = (error0 - fitness2[i][i]) / differences[i]
45+
df2di2 = (fitness2[j][j] - fitness2[i][j]) / differences[i]
46+
dff2didj = (df2di2 - df2di1) / differences[j]
47+
hessian[i][j] = (dff1didj + dff2didj) / 2
48+
hessian[j][i] = hessian[i][j]
49+
with open("pickled_hessian_{}.p".format(fit_type), "wb") as file:
50+
pickle.dump(hessian, file)
51+
m_hessian = np.matrix(hessian)
52+
covariance = np.linalg.inv(m_hessian)
53+
cv_array = np.array(covariance)
54+
paramaters=['Traps', 'EH_Decay', 'E_Trap', 'TH_loss', 'G_Decay', 'G2_Decay', 'G3_Decay', 'GH_Decay', 'G_Escape', 'G_Form', 'G3_Loss']
55+
with open('Parameters.txt', 'w') as f:
56+
writer = csv.writer(f, dialect="excel-tab")
57+
for i in range(10):
58+
error = np.sqrt(cv_array[i][i])
59+
relerror = error / p0[i] * 100
60+
words = '{}{}: {} +- {} ({}%)'.format(' ' * (8-len(paramaters[i])), paramaters[i], p0[i], error, relerror)
61+
print(words)
62+
writer.writerow([words])
63+
64+
#%%
65+
errors = np.zeros_like(fitness1)
66+
for i in range(10):
67+
for j in range(10):
68+
e1 = fitness1[i][j]-error0
69+
e2 = error0-fitness2[i][j]
70+
diff = (e1-e2)**2
71+
errors[i][j]=diff
72+
print("max_diff:{}".format(np.max(errors)))
73+
print(errors[3][3])
74+
print(fitness1[3][3]-error0)
75+
plt.figure()
76+
new_array = np.ones_like(cv_array)
77+
for i in range(10):
78+
for j in range(10):
79+
if not i == j:
80+
new_array[i][j] = cv_array[i][j] / p0[i] / p0[j]
81+
else:
82+
new_array[i][j] = cv_array[i][j] / p0[i] / p0[j]
83+
new_array = np.sqrt(np.abs(new_array))
84+
new_array2 = np.delete(new_array, [4,6], axis=0)
85+
new_array2 = np.delete(new_array2, [4,6], axis=1)
86+
ave = np.average(new_array2)
87+
for i in range(10):
88+
for j in range(10):
89+
if i == 4 or i == 6 or j == 4 or j == 6 or i==j:
90+
new_array[i][j] = ave
91+
levels = np.linspace(np.min(new_array), np.max(new_array), 50)
92+
plt.contourf(new_array, levels=levels)
93+
plt.colorbar()
94+
plt.grid(which='major')
95+
plt.title("Normalized Covariance")
96+
plt.show()

0 commit comments

Comments
 (0)