Skip to content

Commit

Permalink
Merge pull request #53 from neutrons/dq-update
Browse files Browse the repository at this point in the history
Update dq for q summing
  • Loading branch information
mdoucet authored Dec 12, 2024
2 parents 730dba8 + dbb1aea commit 1cff7ed
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 16 deletions.
53 changes: 41 additions & 12 deletions reduction/lr_reduction/event_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,8 @@ def __init__(self, scattering_workspace, direct_workspace,
self._offspec_x_bins = None
self._offspec_z_bins = None
self.summing_threshold = None
self.q_summing = False
self.dq_over_q = 0
self.dead_time = dead_time
self.paralyzable = paralyzable
self.dead_time_value = dead_time_value
Expand Down Expand Up @@ -287,10 +289,15 @@ def extract_meta_data(self):
else:
self.wl_range = [self.tof_range[0] / self.constant, self.tof_range[1] / self.constant]

# q_min and q_max are the boundaries for the final Q binning
# We also hold on to the true Q range covered by the measurement
self.q_min_meas = 4.0*np.pi/self.wl_range[1] * np.fabs(np.sin(self.theta))
self.q_max_meas = 4.0*np.pi/self.wl_range[0] * np.fabs(np.sin(self.theta))

if self.q_min is None:
self.q_min = 4.0*np.pi/self.wl_range[1] * np.fabs(np.sin(self.theta))
self.q_min = self.q_min_meas
if self.q_max is None:
self.q_max = 4.0*np.pi/self.wl_range[0] * np.fabs(np.sin(self.theta))
self.q_max = self.q_max_meas

# Q binning to use
self.q_bins = get_q_binning(self.q_min, self.q_max, self.q_step)
Expand Down Expand Up @@ -351,7 +358,7 @@ def __repr__(self):
output += " source-det: %s\n" % self.source_detector_distance
output += " pixel: %s\n" % self.pixel_width
output += " WL: %s %s\n" % (self.wl_range[0], self.wl_range[1])
output += " Q: %s %s\n" % (self.q_min, self.q_max)
output += " Q: %s %s\n" % (self.q_min_meas, self.q_max_meas)
theta_degrees = self.theta * 180 / np.pi
output += " Theta = %s\n" % theta_degrees
output += " Emission delay = %s" % self.use_emission_time
Expand All @@ -377,13 +384,12 @@ def to_dict(self):
norm_run = 0

dq0 = 0
dq_over_q = compute_resolution(self._ws_sc, theta=self.theta)
return dict(wl_min=self.wl_range[0], wl_max=self.wl_range[1],
q_min=self.q_min, q_max=self.q_max, theta=self.theta,
q_min=self.q_min_meas, q_max=self.q_max_meas, theta=self.theta,
start_time=start_time, experiment=experiment, run_number=run_number,
run_title=run_title, norm_run=norm_run, time=time.ctime(),
dq0=dq0, dq_over_q=dq_over_q, sequence_number=sequence_number,
sequence_id=sequence_id)
dq0=dq0, dq_over_q=self.dq_over_q, sequence_number=sequence_number,
sequence_id=sequence_id, q_summing=self.q_summing)

def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False,
clean=False, normalize=True):
Expand Down Expand Up @@ -420,6 +426,10 @@ def specular(self, q_summing=False, tof_weighted=False, bck_in_q=False,
self.d_refl = self.d_refl[idx[:-1]]
self.q_bins = self.q_bins[idx]

# Compute Q resolution
self.dq_over_q = compute_resolution(self._ws_sc, theta=self.theta, q_summing=q_summing)
self.q_summing = q_summing

return self.q_bins, self.refl, self.d_refl

def specular_unweighted(self, q_summing=False, normalize=True):
Expand Down Expand Up @@ -873,11 +883,32 @@ def gravity_correction(self, ws, wl_list):
return (theta_sample-theta_in) * np.pi / 180.0


def compute_resolution(ws, default_dq=0.027, theta=None):
def compute_resolution(ws, default_dq=0.027, theta=None, q_summing=False):
"""
Compute the Q resolution from the meta data.
:param theta: scattering angle in radians
:param q_summing: if True, the pixel size will be used for the resolution
"""
settings = read_settings(ws)

if theta is None:
theta = abs(ws.getRun().getProperty("ths").value[0]) * np.pi / 180.

if q_summing:
# Q resolution is reported as FWHM, so here we consider this to be
# related to the pixel width
sdd = 1830
if "sample-det-distance" in settings:
sdd = settings["sample-det-distance"] * 1000
pixel_size = 0.7
if "pixel-width" in settings:
pixel_size = settings["pixel-width"]

# All angles here in radians, assuming small angles
dq_over_q = np.arcsin(pixel_size / sdd) / theta
print("Q summing: %g" % dq_over_q)
return dq_over_q

# We can't compute the resolution if the value of xi is not in the logs.
# Since it was not always logged, check for it here.
if not ws.getRun().hasProperty("BL4B:Mot:xi.RBV"):
Expand All @@ -894,12 +925,10 @@ def compute_resolution(ws, default_dq=0.027, theta=None):

# Distance between the s1 and the sample
s1_sample_distance = 1485
if ws.getInstrument().hasParameter("s1-sample-distance"):
s1_sample_distance = ws.getInstrument().getNumberParameter("s1-sample-distance")[0]*1000
if "s1-sample-distance" in settings:
s1_sample_distance = settings["s1-sample-distance"] * 1000

s1h = abs(ws.getRun().getProperty("S1VHeight").value[0])
if theta is None:
theta = abs(ws.getRun().getProperty("ths").value[0]) * np.pi / 180.
xi = abs(ws.getRun().getProperty("BL4B:Mot:xi.RBV").value[0])
sample_si_distance = xi_reference - xi
slit_distance = s1_sample_distance - sample_si_distance
Expand Down
2 changes: 0 additions & 2 deletions reduction/lr_reduction/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,6 @@ def save_ascii(self, file_path, meta_as_json=False):
initial_entry_written = True

# Write R(q)
fd.write('# dQ0[1/Angstrom] = %g\n' % _meta['dq0'])
fd.write('# dQ/Q = %g\n' % _meta['dq_over_q'])
fd.write('# %-21s %-21s %-21s %-21s\n' % ('Q [1/Angstrom]', 'R', 'dR', 'dQ [FWHM]'))
for i in range(len(self.qz_all)):
fd.write('%20.16f %20.16f %20.16f %20.16f\n' % (self.qz_all[i], self.refl_all[i], self.d_refl_all[i], self.d_qz_all[i]))
Expand Down
32 changes: 30 additions & 2 deletions reduction/test/test_reduction.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# standard imports
from pathlib import Path
import os
from pathlib import Path

# third-party imports
import mantid
Expand All @@ -11,7 +11,6 @@
from lr_reduction import event_reduction, template, workflow
from lr_reduction.utils import amend_config


mtd_api.config["default.facility"] = "SNS"
mtd_api.config["default.instrument"] = "REF_L"
mantid.kernel.config.setLogLevel(3)
Expand Down Expand Up @@ -47,6 +46,35 @@ def test_attenuation(nexus_dir):
event_reduction.process_attenuation(ws_sc, 0.005)


def test_q_summing(nexus_dir):
"""
Test Q summing process
"""
template_path = 'data/template.xml'
template.read_template(template_path, 7)
with amend_config(data_dir=nexus_dir):
ws_sc = mtd_api.Load("REF_L_%s" % 198415)
qz_mid0, refl0, _, meta_data = template.process_from_template_ws(ws_sc, template_path, info=True)

assert(np.fabs(meta_data['dq_over_q'] - 0.02759) < 1e-3)

# Now try with Q summing, which should have similar results
qz_mid, refl, _, meta_data = template.process_from_template_ws(ws_sc, template_path,
tof_weighted=True,
info=True, q_summing=True)

assert(np.fabs(meta_data['dq_over_q'] - 0.009354) < 1e-5)

# Note that TOF weighted may have a slightly different range, so here we skip
# the extra point.
assert(len(qz_mid0) == len(qz_mid[1:]))
assert(np.fabs(np.mean(refl[1:]-refl0)) < 1e-6)

# Cleanup
output_dir = 'data/'
cleanup_partial_files(output_dir, range(198409, 198417))


def test_full_reduction(nexus_dir):
"""
Test the full reduction chain
Expand Down

0 comments on commit 1cff7ed

Please sign in to comment.