diff --git a/fastpt/FASTPT.py b/fastpt/FASTPT.py index e9401bf..ae92313 100644 --- a/fastpt/FASTPT.py +++ b/fastpt/FASTPT.py @@ -44,11 +44,14 @@ from scipy.signal import fftconvolve import scipy.integrate as integrate from .fastpt_extr import p_window, c_window, pad_left, pad_right -from .matter_power_spt import P_13_reg, Y1_reg_NL, Y2_reg_NL +from .matter_power_spt import P_13_reg, Y1_reg_NL, Y2_reg_NL, P_22 from .initialize_params import scalar_stuff, tensor_stuff from .IA_tt import IA_tt from .IA_ABD import IA_A, IA_DEE, IA_DBB, P_IA_B from .IA_ta import IA_deltaE1, P_IA_deltaE2, IA_0E0E, IA_0B0B +from .IA_ct import IA_tij_feG2, IA_tij_heG2, IA_tij_F2F2, IA_tij_G2G2, IA_tij_F2G2, P_IA_13G, P_IA_13F, P_22F_reg, P_22G_reg, IA_tij_F2G2reg +from .IA_ctbias import IA_gb2_F2, IA_gb2_G2, IA_gb2_S2F2, IA_gb2_S2G2 +from .J_k import J_k from .OV import OV from .kPol import kPol from .RSD import RSDA, RSDB @@ -172,6 +175,7 @@ def __init__(self, k, nu=None, to_do=None, param_mat=None, low_extrap=None, high self.OV_do = False self.kPol_do = False self.RSD_do = False + self.IA_tij_do = False for entry in to_do: # convert to_do list to instructions for FAST-PT initialization if entry == 'one_loop_dd': @@ -190,6 +194,7 @@ def __init__(self, k, nu=None, to_do=None, param_mat=None, low_extrap=None, high self.IA_tt_do = True self.IA_ta_do = True self.IA_mix_do = True + self.IA_tij_do = True continue elif entry == 'IA_tt': self.IA_tt_do = True @@ -212,6 +217,13 @@ def __init__(self, k, nu=None, to_do=None, param_mat=None, low_extrap=None, high elif entry == 'IRres': self.dd_do = True continue + elif entry == 'tij': + self.IA_dd_do = True + self.IA_ta_do = True + self.IA_tt_do = True + self.IA_mix_do = True + self.IA_tij_do = True + continue elif entry == 'all' or entry == 'everything': self.dd_do = True self.dd_bias_do = True @@ -222,6 +234,7 @@ def __init__(self, k, nu=None, to_do=None, param_mat=None, low_extrap=None, high self.kPol_do = True self.RSD_do = True self.cleft = True + self.IA_tij_do = True continue else: raise ValueError('FAST-PT does not recognize "' + entry + '" in the to_do list.') @@ -238,6 +251,7 @@ def __init__(self, k, nu=None, to_do=None, param_mat=None, low_extrap=None, high self.X_spt = scalar_stuff(p_mat, nu, self.N, self.m, self.eta_m, self.l, self.tau_l) self.X_lpt = scalar_stuff(p_mat_lpt, nu, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_sptG = scalar_stuff(p_mat, nu, self.N, self.m, self.eta_m, self.l, self.tau_l) if self.cleft: nu = -2 @@ -275,6 +289,39 @@ def __init__(self, k, nu=None, to_do=None, param_mat=None, low_extrap=None, high self.X_IA_deltaE1 = tensor_stuff(p_mat_deltaE1, self.N, self.m, self.eta_m, self.l, self.tau_l) self.X_IA_0E0E = tensor_stuff(p_mat_0E0E, self.N, self.m, self.eta_m, self.l, self.tau_l) self.X_IA_0B0B = tensor_stuff(p_mat_0B0B, self.N, self.m, self.eta_m, self.l, self.tau_l) + + if self.IA_tij_do: + IA_tij_feG2_tab = IA_tij_feG2() + IA_tij_heG2_tab = IA_tij_heG2() + IA_tij_F2F2_tab = IA_tij_F2F2() + IA_tij_G2G2_tab = IA_tij_G2G2() + IA_tij_F2G2_tab = IA_tij_F2G2() + IA_tij_F2G2reg_tab =IA_tij_F2G2reg() + IA_gb2_F2_tab = IA_gb2_F2() + IA_gb2_G2_tab = IA_gb2_G2() + IA_gb2_S2F2_tab = IA_gb2_S2F2() + IA_gb2_S2G2_tab = IA_gb2_S2G2() + p_mat_tij_feG2 = IA_tij_feG2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_tij_heG2 = IA_tij_heG2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_tij_F2F2 = IA_tij_F2F2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_tij_G2G2 = IA_tij_G2G2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_tij_F2G2 = IA_tij_F2G2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_tij_F2G2reg_tab = IA_tij_F2G2reg_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_gb2_F2 = IA_gb2_F2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_gb2_G2 = IA_gb2_G2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_gb2_S2F2 = IA_gb2_S2F2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + p_mat_gb2_S2G2 = IA_gb2_S2G2_tab[:, [0, 1, 5, 6, 7, 8, 9]] + self.X_IA_tij_feG2 = tensor_stuff(p_mat_tij_feG2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_tij_heG2 = tensor_stuff(p_mat_tij_heG2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_tij_F2F2 = tensor_stuff(p_mat_tij_F2F2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_tij_G2G2 = tensor_stuff(p_mat_tij_G2G2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_tij_F2G2 = tensor_stuff(p_mat_tij_F2G2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_tij_F2G2reg = tensor_stuff(p_mat_tij_F2G2reg_tab, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_gb2_F2 = tensor_stuff(p_mat_gb2_F2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_gb2_G2 = tensor_stuff(p_mat_gb2_G2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_gb2_S2F2 = tensor_stuff(p_mat_gb2_S2F2, self.N, self.m, self.eta_m, self.l, self.tau_l) + self.X_IA_gb2_S2G2 = tensor_stuff(p_mat_gb2_S2G2, self.N, self.m, self.eta_m, self.l, self.tau_l) + if self.OV_do: # For OV, we can use two different values for @@ -324,6 +371,8 @@ def one_loop_dd(self, P, P_window=None, C_window=None): P13 = P_13_reg(self.k_old, Ps) P_1loop = P22 + P13 + + if (self.dd_bias_do): # if dd_bias is in to_do, this function acts like one_loop_dd_bias @@ -362,6 +411,10 @@ def one_loop_dd(self, P, P_window=None, C_window=None): return P_1loop, Ps + + + + def one_loop_dd_bias(self, P, P_window=None, C_window=None): nu = -2 @@ -592,6 +645,72 @@ def IA_ta(self, P, P_window=None, C_window=None): def IA_der(self, P, P_window=None, C_window=None): P_der = (self.k_original**2)*P return P_der + + def IA_ct(self,P,P_window=None, C_window=None): + P_feG2, A = self.J_k_tensor(P,self.X_IA_tij_feG2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_feG2 = self.EK.PK_original(P_feG2) + P_heG2, A = self.J_k_tensor(P,self.X_IA_tij_heG2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_heG2 = self.EK.PK_original(P_heG2) + P_F2F2, A = self.J_k_tensor(P,self.X_IA_tij_F2F2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_F2F2 = self.EK.PK_original(P_F2F2) + P_G2G2, A = self.J_k_tensor(P,self.X_IA_tij_G2G2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_G2G2 = self.EK.PK_original(P_G2G2) + P_F2G2, A = self.J_k_tensor(P,self.X_IA_tij_F2G2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_F2G2 = self.EK.PK_original(P_F2G2) + P_A00E,A,B,C = self.IA_ta(P, P_window=P_window, C_window=C_window) + P_A0E2,D,E,F = self.IA_mix(P,P_window=P_window, C_window=C_window) + P_13F = P_IA_13F(self.k_original, P) + P_13G = P_IA_13G(self.k_original,P,) + nu=-2 + Ps, mat = self.J_k_scalar(P, self.X_spt, nu, P_window=P_window, C_window=C_window) + one_loop_coef = np.array( + [2 * 1219 / 1470., 2 * 671 / 1029., 2 * 32 / 1715., 2 * 1 / 3., 2 * 62 / 35., 2 * 8 / 35., 1 / 3.]) + P22_mat = np.multiply(one_loop_coef, np.transpose(mat)) + P_22F = np.sum(P22_mat, 1) + + one_loop_coefG= np.array( + [2*1003/1470, 2*803/1029, 2*64/1715, 2*1/3, 2*58/35, 2*12/35, 1/3]) + PsG, matG = self.J_k_scalar(P, self.X_sptG, nu, P_window=P_window, C_window=C_window) + P22G_mat = np.multiply(one_loop_coefG, np.transpose(matG)) + P_22G = np.sum(P22G_mat, 1) + if (self.extrap): + _, P_22F=self.EK.PK_original(P_22F) + _, P_22G=self.EK.PK_original(P_22G) + P_tEtE = P_F2F2+P_G2G2-2*P_F2G2 + P_0tE = P_22G-P_22F+P_13G-P_13F + P_0EtE = np.subtract(P_feG2,(1/2)*P_A00E) + P_E2tE = np.subtract(P_heG2,(1/2)*P_A0E2) + + return 2*P_0tE,2*P_0EtE,2*P_E2tE,2*P_tEtE + + + def IA_ctbias(self,P,P_window=None, C_window=None): + P_F2, A = self.J_k_tensor(P,self.X_IA_gb2_F2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_F2 = self.EK.PK_original(P_F2) + P_G2, A = self.J_k_tensor(P,self.X_IA_gb2_G2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_G2 = self.EK.PK_original(P_G2) + P_d2tE = P_G2-P_F2 + P_S2F2, A = self.J_k_tensor(P, self.X_IA_gb2_S2F2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_S2F2 = self.EK.PK_original(P_S2F2) + + #P_13S2F2 = P_IA_13S2F2(self.k_original, P) + + P_S2G2, A = self.J_k_tensor(P, self.X_IA_gb2_S2G2, P_window=P_window, C_window=C_window) + if (self.extrap): + _, P_S2G2 = self.EK.PK_original(P_S2G2) + P_s2tE=P_S2G2-P_S2F2 + + return 2*P_d2tE,2*P_s2tE + + def OV(self, P, P_window=None, C_window=None): P, A = self.J_k_tensor(P, self.X_OV, P_window=P_window, C_window=C_window) diff --git a/fastpt/FASTPT_simple.py b/fastpt/FASTPT_simple.py index 651f254..c120c6e 100644 --- a/fastpt/FASTPT_simple.py +++ b/fastpt/FASTPT_simple.py @@ -257,13 +257,13 @@ def P22(self,P,P_window=None,C_window=None): def one_loop(self,P,P_window=None,C_window=None): - Ps,P22=self.P22(P,P_window,C_window) - P13=P_13_reg(self.k_old,Ps) - if (self.extrap): - _,P=self.EK.PK_original(P22+P13) - return P + Ps,P22=self.P22(P,P_window,C_window) + P13=P_13_reg(self.k_old,Ps) + if (self.extrap): + _,P=self.EK.PK_original(P22+P13) + return P - return P22+P13 + return P22+P13 def P_bias(self,P,P_window=None,C_window=None): # Quadraric bias Legendre components diff --git a/fastpt/IA_ct.py b/fastpt/IA_ct.py new file mode 100644 index 0000000..1de9e45 --- /dev/null +++ b/fastpt/IA_ct.py @@ -0,0 +1,213 @@ +from __future__ import division +import numpy as np +from .J_table import J_table +from .J_k import J_k +import sys +from time import time +from numpy import log, sqrt, exp, pi +from scipy.signal import fftconvolve as convolve + +def IA_tij_feG2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_tij_feG2=np.array([[0,0,0,2,0,13/21],\ + [0,0,0,2,2,8/21],\ + [1,-1,0,2,1,1/2],\ + [-1,1,0,2,1,1/2]], dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_tij_feG2.shape[0]): + x=J_table(l_mat_tij_feG2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_tij_heG2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_tij_heG2=np.array([[0,0,0,0,0,-9/70],\ + [0,0,2,0,0,-26/63],\ + [0,0,0,0,2,-15/49],\ + [0,0,2,0,2,-16/63],\ + [0,0,1,1,1,81/70],\ + [0,0,1,1,3,12/35],\ + [0,0,0,0,4,-16/245],\ + [1,-1,0,0,1,-3/10],\ + [1,-1,2,0,1,-1/3],\ + [1,-1,1,1,0,1/2],\ + [1,-1,1,1,2,1],\ + [1,-1,0,2,1,-1/3],\ + [1,-1,0,0,3,-1/5]], dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_tij_heG2.shape[0]): + x=J_table(l_mat_tij_heG2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_tij_F2F2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_tij_F2F2=np.array([[0,0,0,0,0,1219/1470],\ + [0,0,0,0,2,671/1029],\ + [0,0,0,0,4,32/1715],\ + [2,-2,0,0,0,1/6],\ + [2,-2,0,0,2,1/3],\ + [1,-1,0,0,1,62/35],\ + [1,-1,0,0,4,8/35]], dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_tij_F2F2.shape[0]): + x=J_table(l_mat_tij_F2F2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_tij_G2G2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_tij_G2G2=np.array([[0,0,0,0,0,851/1470],\ + [0,0,0,0,2,871/1029],\ + [0,0,0,0,4,128/1715],\ + [2,-2,0,0,0,1/6],\ + [2,-2,0,0,2,1/3],\ + [1,-1,0,0,1,54/35],\ + [1,-1,0,0,4,16/35]], dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_tij_G2G2.shape[0]): + x=J_table(l_mat_tij_G2G2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_tij_F2G2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_tij_F2G2=np.array([[0,0,0,0,0,1003/1470],\ + [0,0,0,0,2,803/1029],\ + [0,0,0,0,4,64/1715],\ + [2,-2,0,0,0,1/6],\ + [2,-2,0,0,2,1/3],\ + [1,-1,0,0,1,58/35],\ + [1,-1,0,0,4,12/35]], dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_tij_F2G2.shape[0]): + x=J_table(l_mat_tij_F2G2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def P_IA_13G(k,P): + N=k.size + n = np.arange(-N+1,N ) + dL=log(k[1])-log(k[0]) + s=n*dL + cut=7 + high_s=s[s > cut] + low_s=s[s < -cut] + mid_high_s=s[ (s <= cut) & (s > 0)] + mid_low_s=s[ (s >= -cut) & (s < 0)] + + # For Zbar + Z1=lambda r : (12/r**2-26+4*r**2-6*r**4+(3/r**3)*(r**2-1)**3*(r**2+2)*log((r+1.)/np.absolute(r-1.))) *r + Z1_low=lambda r : (-224/5+1248/(35*r**2)-608/(105*r**4)-26/(21*r**6)-4/(35*r**8)+34/(21*r**10) -4/(3*r**12)) *r + Z1_high=lambda r: ((-32*r**2)/5-(96*r**4)/7+(352*r**6)/105+(164*r**8)/105-(58*r**10)/35+(4*r**12)/21+(2*r**14)/3) *r + + f_mid_low=Z1(exp(-mid_low_s)) + f_mid_high=Z1(exp(-mid_high_s)) + f_high = Z1_high(exp(-high_s)) + f_low = Z1_low(exp(-low_s)) + + f=np.hstack((f_low,f_mid_low,-16.,f_mid_high,f_high)) + # print(f) + + g= convolve(P, f) * dL + g_k=g[N-1:2*N-1] + deltaE2= 1/84 * k**3/(2*pi)**2 * P*g_k + return deltaE2 + +def P_22F_reg(k,P, P_window, C_window, n_pad): + # P_22 Legendre components + # We calculate a regularized version of P_22 + # by omitting the J_{2,-2,0} term so that the + # integral converges. In the final power spectrum + # we add the asymptotic portions of P_22 and P_13 so + # that we get a convergent integral. See section of XXX. + + param_matrix=np.array([[0,0,0,0],[0,0,2,0],[0,0,4,0],[2,-2,2,0],\ + [1,-1,1,0],[1,-1,3,0],[2,-2,0,1] ]) + + + Power, mat=J_k(k,P,param_matrix, P_window=P_window, C_window=C_window, n_pad=n_pad) + A=1219/1470.*mat[0,:] + B=671/1029.*mat[1,:] + C=32/1715.*mat[2,:] + D=1/3.*mat[3,:] + E=62/35.*mat[4,:] + F=8/35.*mat[5,:] + reg=1/3.*mat[6,:] + + return 2*(A+B+C+D+E+F)+ reg + +def P_22G_reg(k,P, P_window, C_window, n_pad): + # P_22 Legendre components + # We calculate a regularized version of P_22 + # by omitting the J_{2,-2,0} term so that the + # integral converges. In the final power spectrum + # we add the asymptotic portions of P_22 and P_13 so + # that we get a convergent integral. See section of XXX. + + param_matrix=np.array([[0,0,0,0],[0,0,2,0],[0,0,4,0],[2,-2,2,0],\ + [1,-1,1,0],[1,-1,3,0],[2,-2,0,1]]) + + + Power, mat=J_k(k,P,param_matrix, P_window=P_window, C_window=C_window, n_pad=n_pad) + A=1003/1470.*mat[0,:] + B=803/1029.*mat[1,:] + C=64/1715.*mat[2,:] + D=1/3.*mat[3,:] + E=58/35.*mat[4,:] + F=12/35.*mat[5,:] + reg=1/3.*mat[6,:] + + + return 2*(A+B+C+D+E+F)+ reg + +def IA_tij_F2G2reg(): + # P_22 Legendre components + # We calculate a regularized version of P_22 + # by omitting the J_{2,-2,0} term so that the + # integral converges. In the final power spectrum + # we add the asymptotic portions of P_22 and P_13 so + # that we get a convergent integral. See section of XXX. + + l_mat_tij_F2G2=np.array([[0,0,0,0,0,1003/1470],\ + [0,0,0,0,2,803/1029],\ + [0,0,0,0,4,64/1715],\ + [2,-2,0,0,2,1/3],\ + [1,-1,0,0,1,58/35],\ + [1,-1,0,0,4,12/35]], dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_tij_F2G2.shape[0]): + x=J_table(l_mat_tij_F2G2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + + +def P_IA_13F(k,P): + N=k.size + n= np.arange(-N+1,N ) + dL=log(k[1])-log(k[0]) + s=n*dL + + cut=7 + high_s=s[s > cut] + low_s=s[s < -cut] + mid_high_s=s[ (s <= cut) & (s > 0)] + mid_low_s=s[ (s >= -cut) & (s < 0)] + + Z=lambda r : (12./r**2 +10. + 100.*r**2-42.*r**4 \ + + 3./r**3*(r**2-1.)**3*(7*r**2+2.)*log((r+1.)/np.absolute(r-1.)) ) *r + Z_low=lambda r : (352./5.+96./.5/r**2 -160./21./r**4 - 526./105./r**6 +236./35./r**8-50./21./r**10-4./3./r**12) *r + Z_high=lambda r: (928./5.*r**2 - 4512./35.*r**4 +416./21.*r**6 +356./105.*r**8+74./35.*r**10-20./3.*r**12+14./3.*r**14) *r + + f_mid_low=Z(exp(-mid_low_s)) + f_mid_high=Z(exp(-mid_high_s)) + f_high = Z_high(exp(-high_s)) + f_low = Z_low(exp(-low_s)) + + f=np.hstack((f_low,f_mid_low,80,f_mid_high,f_high)) + + g= convolve(P, f) * dL + g_k=g[N-1:2*N-1] + P_bar= 1./252.* k**3/(2*pi)**2*P*g_k + + return P_bar diff --git a/fastpt/IA_ctbias.py b/fastpt/IA_ctbias.py new file mode 100644 index 0000000..7a189cd --- /dev/null +++ b/fastpt/IA_ctbias.py @@ -0,0 +1,62 @@ +from __future__ import division +import numpy as np +from .J_table import J_table +import sys +from time import time +from numpy import log, sqrt, exp, pi +from scipy.signal import fftconvolve as convolve + +def IA_gb2_F2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_gb2_F2=np.array([[0,0,0,0,0,17/21],\ + [0,0,0,0,2,4/21],\ + [1,-1,0,0,1,1/2],\ + [-1,1,0,0,1,1/2]],dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_gb2_F2.shape[0]): + x=J_table(l_mat_gb2_F2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_gb2_G2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_gb2_G2=np.array([[0,0,0,0,0,13/21],\ + [0,0,0,0,2,8/21],\ + [1,-1,0,0,1,1/2],\ + [-1,1,0,0,1,1/2]],dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_gb2_G2.shape[0]): + x=J_table(l_mat_gb2_G2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_gb2_S2F2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_gb2_S2F2=np.array([[0,0,0,0,0,8/315],\ + [0,0,0,0,2,254/441],\ + [0,0,0,0,4,16/245],\ + [1,-1,0,0,1,2/15],\ + [1,-1,0,0,3,1/5],\ + [-1,1,0,0,1,2/15],\ + [-1,1,0,0,1,1/5]],dtype=float) + [-1,1,0,0,3,1/5]],dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_gb2_S2F2.shape[0]): + x=J_table(l_mat_gb2_S2F2[i]) + table=np.row_stack((table,x)) + return table[1:,:] + +def IA_gb2_S2G2(): + # Ordering is \alpha, \beta, l_1, l_2, l, A coeficient + l_mat_gb2_S2G2=np.array([[0,0,0,0,0,16/315],\ + [0,0,0,0,2,214/441],\ + [0,0,0,0,4,32/245],\ + [1,-1,0,0,1,2/15],\ + [1,-1,0,0,3,1/5],\ + [-1,1,0,0,1,2/15],\ + [-1,1,0,0,1,1/5]],dtype=float) + table=np.zeros(10,dtype=float) + for i in range(l_mat_gb2_S2G2.shape[0]): + x=J_table(l_mat_gb2_S2G2[i]) + table=np.row_stack((table,x)) + return table[1:,:] \ No newline at end of file diff --git a/fastpt/fastpttest.py b/fastpt/fastpttest.py deleted file mode 100644 index b137b31..0000000 --- a/fastpt/fastpttest.py +++ /dev/null @@ -1,87 +0,0 @@ -import fastpt as pt -from time import time -import matplotlib.pyplot as plt -import numpy as np - - # Version check - - # load the data file - -d = np.loadtxt('/home/carterw/FAST-PT/examples/Pk_test.dat') - # declare k and the power spectrum -k = d[:, 0]; -P = d[:, 1] - - # set the parameters for the power spectrum window and - # Fourier coefficient window - # P_window=np.array([.2,.2]) -C_window = .75 - # document this better in the user manual - - # padding length -n_pad = int(0.5 * len(k)) - # to_do=['one_loop_dd','IA_tt'] -to_do = ['one_loop_dd'] - # to_do=['dd_bias','IA_all'] - # to_do=['all'] - - # initialize the FASTPT class - # including extrapolation to higher and lower k -t1 = time() -fpt = pt(k, to_do=to_do, low_extrap=-5, high_extrap=3, n_pad=n_pad) - -t2 = time() - # calculate 1loop SPT (and time the operation) - # P_spt=fastpt.one_loop_dd(P,C_window=C_window) -P_lpt = fpt.one_loop_dd_bias_lpt(P, C_window=C_window) -P_der = fpt.IA_der(P,C_window=C_window) - - # for M = 10**14 M_sun/h -b1L = 1.02817 -b2L = -0.0426292 -b3L = -2.55751 -b1E = 1 + b1L - - # for M = 10**14 M_sun/h - # b1_lag = 1.1631 - # b2_lag = 0.1162 - - # [Ps, Pnb, Pb1L, Pb1L_2, Pb1L_b2L, Pb2L, Pb2L_2, Pb3L, Pb1L_b3L] = [P_lpt[0],P_lpt[1],P_lpt[2],P_lpt[3],P_lpt[4],P_lpt[5],P_lpt[6],P_lpt[7],P_lpt[8]] -[Ps, Pnb, Pb1L, Pb1L_2, Pb1L_b2L, Pb2L, Pb2L_2] = [P_lpt[0], P_lpt[1], P_lpt[2], P_lpt[3], P_lpt[4], P_lpt[5], - P_lpt[6]] - - # Pgg_lpt = (b1E**2)*P + Pnb + (b1L)*(Pb1L) + (b1L**2)*Pb1L_2 + (b1L*b2L)*Pb1L_b2L + (b2L)*(Pb2L) + (b2L**2)*Pb2L_2 + (b3L)*(Pb3L) + (b1L*b3L)*Pb1L_b3L -Pgg_lpt = (b1E ** 2) * P + Pnb + (b1L) * (Pb1L) + (b1L ** 2) * Pb1L_2 + (b1L * b2L) * Pb1L_b2L + (b2L) * (Pb2L) + ( - b2L ** 2) * Pb2L_2 - - # print([pnb,pb1L,pb1L_2,pb2L,Pb1L_b2L]) - -t3 = time() -print('initialization time for', to_do, "%10.3f" % (t2 - t1), 's') -print('one_loop_dd recurring time', "%10.3f" % (t3 - t2), 's') - - # calculate tidal torque EE and BB P(k) - # P_IA_tt=fastpt.IA_tt(P,C_window=C_window) - # P_IA_ta=fastpt.IA_ta(P,C_window=C_window) - # P_IA_mix=fastpt.IA_mix(P,C_window=C_window) - # P_RSD=fastpt.RSD_components(P,1.0,C_window=C_window) - # P_kPol=fastpt.kPol(P,C_window=C_window) - # P_OV=fastpt.OV(P,C_window=C_window) - # P_IRres=fastpt.IRres(P,C_window=C_window) - # make a plot of 1loop SPT results - - -ax = plt.subplot(111) -ax.set_xscale('log') -ax.set_yscale('log') -ax.set_ylabel(r'$P(k)$', size=30) -ax.set_xlabel(r'$k$', size=30) - -ax.plot(k, P, label='linear') -# ax.plot(k,P_spt[0], label=r'$P_{22}(k) + P_{13}(k)$' ) -ax.plot(k, Pgg_lpt, label='P_lpt') -ax.plot(k,P_der, label='P_der') - -plt.legend(loc=3) -plt.grid() -plt.show() \ No newline at end of file