|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +from __future__ import division |
| 3 | +import numpy as np |
| 4 | +import PSim |
| 5 | +import pickle |
| 6 | +import csv |
| 7 | +import simplex |
| 8 | + |
| 9 | +fit_type = 'global' |
| 10 | +scale = 0.003 |
| 11 | + |
| 12 | +with open("pickled_data.p", "r") as file: |
| 13 | + pickled_data = pickle.load(file) |
| 14 | + |
| 15 | +powers = pickled_data['powers'] |
| 16 | +xdata = pickled_data['allxdata'] |
| 17 | +ydata = pickled_data['allydata'] |
| 18 | +xarray = pickled_data['xarray'] |
| 19 | +yarrays = pickled_data['yarrays'] |
| 20 | +averages = pickled_data['averages'] |
| 21 | +period = 50 # ns |
| 22 | + |
| 23 | +with open("pickled_data_250.p", "r") as file: |
| 24 | + pickled_data_250 = pickle.load(file) |
| 25 | + |
| 26 | +powers_250 = pickled_data_250['powers'] |
| 27 | +xdata_250 = pickled_data_250['allxdata'] |
| 28 | +ydata_250 = pickled_data_250['allydata'] |
| 29 | +xarray_250 = pickled_data_250['xarray'] |
| 30 | +yarrays_250 = pickled_data_250['yarrays'] |
| 31 | +averages_250 = pickled_data_250['averages'] |
| 32 | +period_250 = 1.0 / 250000.0 / 1e-9 # ns |
| 33 | + |
| 34 | + |
| 35 | +def scalar_min(p, data): |
| 36 | + xdata, ydata, ysim = data[0] |
| 37 | + xdata_250, ydata_250, ysim_250 = data[1] |
| 38 | + scaled_ysim = ysim * p[0] |
| 39 | + scaled_ysim_250 = ysim_250 * p[0] |
| 40 | + err_20 = 0 |
| 41 | + err_250 = 0 |
| 42 | + num_points = 0 |
| 43 | + for dat, sim in zip(ydata, scaled_ysim): |
| 44 | + for x, d, s in zip(xdata, dat, sim): |
| 45 | + try: |
| 46 | + if s > 0: |
| 47 | + log_s = np.log(s) |
| 48 | + else: |
| 49 | + log_s = 0 |
| 50 | + log_d = np.log(d) |
| 51 | + error = (log_s - log_d) |
| 52 | + # error = np.log(error) |
| 53 | + err_20 += error*error |
| 54 | + num_points = num_points + 1 |
| 55 | + except: |
| 56 | + err_20 += 8e20 |
| 57 | + err_20 = err_20 / num_points |
| 58 | + num_points = 0 |
| 59 | + for dat, sim in zip(ydata_250[:-1], scaled_ysim_250[:-1]): |
| 60 | + for x, d, s in zip(xdata_250, dat, sim): |
| 61 | + try: |
| 62 | + if s > 0: |
| 63 | + log_s = np.log(s) |
| 64 | + else: |
| 65 | + log_s = 0 |
| 66 | + log_d = np.log(d) |
| 67 | + error = (log_s - log_d) |
| 68 | + # error = np.log(error) |
| 69 | + if x >= -0.25 and x <= 120: |
| 70 | + err_250 += error*error |
| 71 | + num_points = num_points + 1 |
| 72 | + except: |
| 73 | + err_250 += 8e20 |
| 74 | + err_250 = err_250 / num_points |
| 75 | + err = np.sqrt(err_250*err_20) |
| 76 | + if np.isnan(err): |
| 77 | + err = 7e20 |
| 78 | + fitness = err * 100 |
| 79 | + return fitness |
| 80 | + |
| 81 | + |
| 82 | +def evaluate(p): |
| 83 | + dummy_x = np.zeros(10) |
| 84 | + dummy_y = np.zeros([10, 10]) |
| 85 | + data = [[dummy_x, dummy_y, dummy_y], [dummy_x, dummy_y, dummy_y]] |
| 86 | + if fit_type is 'global' or fit_type is 20: # 20 MHz data |
| 87 | + sim = PSim.DecaySim(reprate=20000000, tolerance=0.005, step=5e-12) |
| 88 | + sim.trap = p[0] |
| 89 | + sim.EHdecay = p[1] * sim.step |
| 90 | + sim.Etrap = p[2] * sim.step |
| 91 | + sim.FHloss = p[3] * sim.step |
| 92 | + sim.Gdecay = p[4] * sim.step |
| 93 | + sim.G2decay = p[5] * sim.step |
| 94 | + sim.G3decay = p[6] * sim.step |
| 95 | + sim.GHdecay = p[7] * sim.step |
| 96 | + sim.Gescape = p[8] * sim.step |
| 97 | + sim.Gform = p[9] * sim.step * 0 |
| 98 | + sim.G3loss = p[9] * sim.step |
| 99 | + sim.scalar = 1 |
| 100 | + for power in powers: |
| 101 | + sim.addPower(power) |
| 102 | + sim.runSim() |
| 103 | + interp_signals = [] |
| 104 | + for this_run in sim.signal: |
| 105 | + interp_this = np.interp(xarray, sim.xdata, this_run) |
| 106 | + interp_signals.append(interp_this) |
| 107 | + interp_signals = np.array(interp_signals) |
| 108 | + data[0] = [xarray, yarrays, interp_signals] |
| 109 | + if fit_type is 'global' or fit_type is 250: # 250 kHz data |
| 110 | + sim_250 = PSim.DecaySim(reprate=250000, tolerance=0.005, step=5e-12) |
| 111 | + sim_250.trap = p[0] |
| 112 | + sim_250.EHdecay = p[1] * sim_250.step |
| 113 | + sim_250.Etrap = p[2] * sim_250.step |
| 114 | + sim_250.FHloss = p[3] * sim_250.step |
| 115 | + sim_250.Gdecay = p[4] * sim_250.step |
| 116 | + sim_250.G2decay = p[5] * sim_250.step |
| 117 | + sim_250.G3decay = p[6] * sim_250.step |
| 118 | + sim_250.GHdecay = p[7] * sim_250.step |
| 119 | + sim_250.Gescape = p[8] * sim_250.step |
| 120 | + sim_250.Gform = p[9] * sim_250.step * 0 |
| 121 | + sim_250.G3loss = p[9] * sim_250.step |
| 122 | + sim_250.scalar = 1 |
| 123 | + for power in powers_250: |
| 124 | + sim_250.addPower(power) |
| 125 | + sim_250.runSim() |
| 126 | + interp_signals_250 = [] |
| 127 | + for this_run in sim_250.signal: |
| 128 | + interp_this = np.interp(xarray_250, sim_250.xdata, this_run) |
| 129 | + interp_signals_250.append(interp_this) |
| 130 | + interp_signals_250 = np.array(interp_signals_250) |
| 131 | + data[1] = [xarray_250, yarrays_250, interp_signals_250] |
| 132 | + # Use a simplex minimization to find the best scalar |
| 133 | + scalar0 = np.array([3e-26]) |
| 134 | + ranges = scalar0*0.1 |
| 135 | + s = simplex.Simplex(scalar_min, scalar0, ranges) |
| 136 | + values, fitness, iter = s.minimize(epsilon=0.00001, maxiters=500, |
| 137 | + monitor=0, data=data) |
| 138 | + scalar = values[0] |
| 139 | + #p[-1] = scalar |
| 140 | + if scalar < 0: |
| 141 | + fitness = 1e30 |
| 142 | + return fitness |
| 143 | + |
| 144 | + |
| 145 | +def main(): |
| 146 | + logname = 'best_{}.log'.format(fit_type) |
| 147 | + with open(logname, 'rb') as best_file: |
| 148 | + reader = csv.reader(best_file, dialect='excel-tab') |
| 149 | + p0 = [] |
| 150 | + for val in reader.next(): |
| 151 | + p0.append(np.float(val)) |
| 152 | + dim = 11 |
| 153 | + pi = np.ones(dim) |
| 154 | + for i, n in enumerate([0,1,2,3,4,5,6,7,8,9,10]): |
| 155 | + pi[i] = p0[n] |
| 156 | + ps1 = np.ndarray([dim, dim, dim]) |
| 157 | + ps2 = np.ndarray([dim, dim, dim]) |
| 158 | + fitness1 = np.ndarray([dim, dim]) |
| 159 | + fitness2 = np.ndarray([dim, dim]) |
| 160 | + differences = scale*pi |
| 161 | + for i in range(dim): |
| 162 | + for j in range(dim): |
| 163 | + for k in range(dim): |
| 164 | + val1 = pi[k] |
| 165 | + val2 = pi[k] |
| 166 | + if i == k or j == k: |
| 167 | + val1 = val1 + differences[k] |
| 168 | + val2 = val2 - differences[k] |
| 169 | + ps1[i][j][k] = val1 |
| 170 | + ps2[i][j][k] = val2 |
| 171 | + for i in range(dim): |
| 172 | + for j in range(i, dim): |
| 173 | + fitness1[i][j] = evaluate(ps1[i][j]) |
| 174 | + fitness1[j][i] = fitness1[i][j] |
| 175 | + fitness2[i][j] = evaluate(ps2[i][j]) |
| 176 | + fitness2[j][i] = fitness2[i][j] |
| 177 | + error0 = evaluate(pi) |
| 178 | + data = {'fitness1': fitness1, |
| 179 | + 'fitness2': fitness2, |
| 180 | + 'differences': differences, |
| 181 | + 'error0': error0} |
| 182 | + with open("covariance_data_{}.p".format(scale), "wb") as file: |
| 183 | + pickle.dump(data, file) |
| 184 | + hessian = np.ndarray([dim, dim]) |
| 185 | + for i in range(dim): |
| 186 | + for j in range(dim): |
| 187 | + if i == j: |
| 188 | + d2i = differences[i] |
| 189 | + df1 = (fitness1[i][j] - error0) / d2i |
| 190 | + df2 = (error0 - fitness2[i][j]) / d2i |
| 191 | + hessian[i][j] = (df1 - df2) / (d2i) |
| 192 | + else: |
| 193 | + df1di1 = (fitness1[i][i] - error0) / differences[i] |
| 194 | + df1di2 = (fitness1[i][j] - fitness1[j][j]) / differences[i] |
| 195 | + dff1didj = (df1di2 - df1di1) / differences[j] |
| 196 | + df2di1 = (error0 - fitness2[i][i]) / differences[i] |
| 197 | + df2di2 = (fitness2[j][j] - fitness2[i][j]) / differences[i] |
| 198 | + dff2didj = (df2di2 - df2di1) / differences[j] |
| 199 | + hessian[i][j] = (dff1didj + dff2didj) / 2 |
| 200 | + hessian[j][i] = hessian[i][j] |
| 201 | + with open("hessian_{}.p".format(scale), "wb") as file: |
| 202 | + pickle.dump(hessian, file) |
| 203 | + m_hessian = np.matrix(hessian) |
| 204 | + covariance = np.linalg.inv(m_hessian) |
| 205 | + cv_array = np.array(covariance) |
| 206 | + paramaters=['Traps', 'EH_Decay', 'E_Trap', 'TH_loss', 'G_Decay', 'G2_Decay', 'G3_Decay', 'GH_Decay', 'G_Escape', 'G3_Loss'] |
| 207 | + for i in range(dim): |
| 208 | + print('{}{}: {} +- {}'.format(' ' * (8-len(paramaters[i])), paramaters[i], p0[i], np.sqrt(cv_array[i][i]))) |
| 209 | + with open('Parameters_{}.txt'.format(scale), 'w') as f: |
| 210 | + writer = csv.writer(f, dialect="excel-tab") |
| 211 | + for i in range(10): |
| 212 | + error = np.sqrt(cv_array[i][i]) |
| 213 | + relerror = error / pi[i] * 100 |
| 214 | + words = '{}{}: {} +- {} ({}%)'.format(' ' * (8-len(paramaters[i])), paramaters[i], pi[i], error, relerror) |
| 215 | + print(words) |
| 216 | + writer.writerow([words]) |
| 217 | + |
| 218 | + |
| 219 | +if __name__ == '__main__': |
| 220 | + main() |
0 commit comments