From 795ef56e3b50de77da1da48b6363f96ae5506dd7 Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Tue, 14 Jan 2025 20:30:23 -0500 Subject: [PATCH 1/5] checkpoint and playground --- reduction/lr_reduction/tof_reduction.py | 142 ++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 reduction/lr_reduction/tof_reduction.py diff --git a/reduction/lr_reduction/tof_reduction.py b/reduction/lr_reduction/tof_reduction.py new file mode 100644 index 00000000..8e608077 --- /dev/null +++ b/reduction/lr_reduction/tof_reduction.py @@ -0,0 +1,142 @@ +import numpy as np + +from lr_reduction.event_reduction import EventReflectivity + + +class TOFReduction(EventReflectivity): + + def __init__(self, **kwargs): + super(TOFReduction, self).__init__(**kwargs) + + def _reflectivity( + self, ws, peak_position, peak, low_res, theta, wl_bins=None, sum_pixels=True, **kwargs + ): + """ + Unused parameters: + - q_bins + + + """ + print(peak_position, peak, low_res, theta, wl_bins, sum_pixels, kwargs) + charge = ws.getRun().getProtonCharge() + self.n_tof_bins = 100 + + shape = len(wl_bins) - 1 if sum_pixels else ((peak[1] - peak[0] + 1), len(wl_bins) - 1) + tof_bins = np.linspace(self.tof_range[0], self.tof_range[1], self.n_tof_bins + 1) + + refl = np.zeros(shape) + d_refl_sq = np.zeros(shape) + #counts = np.zeros(shape) + + for i in range(low_res[0], int(low_res[1] + 1)): + for j in range(peak[0], int(peak[1] + 1)): + if self.instrument == self.INSTRUMENT_4A: + pixel = j * self.n_y + i + else: + pixel = i * self.n_y + j + evt_list = ws.getSpectrum(pixel) + if evt_list.getNumberEvents() == 0: + continue + + tofs = evt_list.getTofs() + if self.use_emission_time: + tofs = self.emission_time_correction(ws, tofs) + + event_weights = evt_list.getWeights() + + _counts, _ = np.histogram(tofs, bins=tof_bins, weights=event_weights) + if sum_pixels: + refl += _counts + else: + refl[j - peak[0]] += _counts + + d_refl_sq = np.sqrt(np.fabs(refl)) / charge + refl /= charge + + return refl, d_refl_sq + + def specular(self, q_summing=False, normalize=True): + """ + Compute specular reflectivity. + + For constant-Q binning, it's preferred to use tof_weighted=True. + + Parameters + ---------- + q_summing : bool + Turns on constant-Q binning + normalize : bool + If True, and tof_weighted is False, normalization will be skipped + + Returns + ------- + q_bins + The Q bin boundaries + refl + The reflectivity values + d_refl + The uncertainties in the reflectivity values + """ + # Scattering data + refl, d_refl = self._reflectivity( + self._ws_sc, + peak_position=self.specular_pixel, + peak=self.signal_peak, + low_res=self.signal_low_res, + theta=self.theta, + ) + # Remove background + if False and self.signal_bck is not None: + refl_bck, d_refl_bck = self.bck_subtraction() + refl -= refl_bck + d_refl = np.sqrt(d_refl**2 + d_refl_bck**2) + + if normalize: + # Normalize by monitor + self._normalize(refl, d_refl) + norm, d_norm = self._reflectivity( + self._ws_db, peak_position=0, peak=self.norm_peak, low_res=self.norm_low_res, theta=self.theta, q_summing=False + ) + + # Direct beam background could be added here. The effect will be negligible. + if self.norm_bck is not None: + norm_bck, d_norm_bck = self.norm_bck_subtraction() + norm -= norm_bck + d_norm = np.sqrt(d_norm**2 + d_norm_bck**2) + db_bins = norm > 0 + + refl[db_bins] = refl[db_bins] / norm[db_bins] + d_refl[db_bins] = np.sqrt( + d_refl[db_bins] ** 2 / norm[db_bins] ** 2 + refl[db_bins] ** 2 * d_norm[db_bins] ** 2 / norm[db_bins] ** 4 + ) + + # Hold on to normalization to be able to diagnose issues later + self.norm = norm[db_bins] + self.d_norm = d_norm[db_bins] + + # Clean up points where we have no direct beam + zero_db = [not v for v in db_bins] + refl[zero_db] = 0 + d_refl[zero_db] = 0 + + # Convert to Q + + self.refl = refl + self.d_refl = d_refl + + + + # Remove leading zeros + r = np.trim_zeros(self.refl, "f") + trim = len(self.refl) - len(r) + self.refl = self.refl[trim:] + self.d_refl = self.d_refl[trim:] + self.q_bins = self.q_bins[trim:] + + + + # Compute Q resolution + #self.dq_over_q = compute_resolution(self._ws_sc, theta=self.theta, q_summing=q_summing) + self.q_summing = q_summing + + return self.q_bins, self.refl, self.d_refl From 6ff023c96ca49f2ff7e49edac1ad5cc3697a71f3 Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Fri, 17 Jan 2025 12:28:35 -0500 Subject: [PATCH 2/5] good version --- reduction/lr_reduction/tof_reduction.py | 128 ++++++++++++++++-------- 1 file changed, 87 insertions(+), 41 deletions(-) diff --git a/reduction/lr_reduction/tof_reduction.py b/reduction/lr_reduction/tof_reduction.py index 8e608077..f7d699ce 100644 --- a/reduction/lr_reduction/tof_reduction.py +++ b/reduction/lr_reduction/tof_reduction.py @@ -5,11 +5,8 @@ class TOFReduction(EventReflectivity): - def __init__(self, **kwargs): - super(TOFReduction, self).__init__(**kwargs) - def _reflectivity( - self, ws, peak_position, peak, low_res, theta, wl_bins=None, sum_pixels=True, **kwargs + self, ws, _, peak, low_res, sum_pixels=True, **__ ): """ Unused parameters: @@ -17,16 +14,11 @@ def _reflectivity( """ - print(peak_position, peak, low_res, theta, wl_bins, sum_pixels, kwargs) charge = ws.getRun().getProtonCharge() - self.n_tof_bins = 100 - - shape = len(wl_bins) - 1 if sum_pixels else ((peak[1] - peak[0] + 1), len(wl_bins) - 1) - tof_bins = np.linspace(self.tof_range[0], self.tof_range[1], self.n_tof_bins + 1) + shape = len(self.tof_bins) - 1 if sum_pixels else ((peak[1] - peak[0] + 1), len(self.tof_bins) - 1) refl = np.zeros(shape) d_refl_sq = np.zeros(shape) - #counts = np.zeros(shape) for i in range(low_res[0], int(low_res[1] + 1)): for j in range(peak[0], int(peak[1] + 1)): @@ -44,14 +36,18 @@ def _reflectivity( event_weights = evt_list.getWeights() - _counts, _ = np.histogram(tofs, bins=tof_bins, weights=event_weights) + _counts, _ = np.histogram(tofs, bins=self.tof_bins, weights=event_weights) + _err, _ = np.histogram(tofs, bins=self.tof_bins, weights=event_weights**2) + if sum_pixels: refl += _counts + d_refl_sq += _err else: refl[j - peak[0]] += _counts + d_refl_sq[j - peak[0]] += _err - d_refl_sq = np.sqrt(np.fabs(refl)) / charge - refl /= charge + d_refl_sq = np.sqrt(d_refl_sq) / charge + refl /= charge return refl, d_refl_sq @@ -77,6 +73,12 @@ def specular(self, q_summing=False, normalize=True): d_refl The uncertainties in the reflectivity values """ + # TODO: make this a parameter + self.n_tof_bins = 260 + + tof_bins = np.linspace(self.tof_range[0], self.tof_range[1], self.n_tof_bins + 1) + self.tof_bins = tof_bins + # Scattering data refl, d_refl = self._reflectivity( self._ws_sc, @@ -84,18 +86,17 @@ def specular(self, q_summing=False, normalize=True): peak=self.signal_peak, low_res=self.signal_low_res, theta=self.theta, + sum_pixels=not q_summing ) # Remove background - if False and self.signal_bck is not None: - refl_bck, d_refl_bck = self.bck_subtraction() + if self.signal_bck is not None: + refl_bck, d_refl_bck = self.bck_subtraction(normalize_to_single_pixel=q_summing, q_summing=q_summing) refl -= refl_bck d_refl = np.sqrt(d_refl**2 + d_refl_bck**2) if normalize: - # Normalize by monitor - self._normalize(refl, d_refl) norm, d_norm = self._reflectivity( - self._ws_db, peak_position=0, peak=self.norm_peak, low_res=self.norm_low_res, theta=self.theta, q_summing=False + self._ws_db, peak_position=0, peak=self.norm_peak, low_res=self.norm_low_res, sum_pixels=True ) # Direct beam background could be added here. The effect will be negligible. @@ -103,28 +104,24 @@ def specular(self, q_summing=False, normalize=True): norm_bck, d_norm_bck = self.norm_bck_subtraction() norm -= norm_bck d_norm = np.sqrt(d_norm**2 + d_norm_bck**2) - db_bins = norm > 0 - refl[db_bins] = refl[db_bins] / norm[db_bins] - d_refl[db_bins] = np.sqrt( - d_refl[db_bins] ** 2 / norm[db_bins] ** 2 + refl[db_bins] ** 2 * d_norm[db_bins] ** 2 / norm[db_bins] ** 4 + refl = refl / norm + d_refl = np.sqrt( + d_refl ** 2 / norm ** 2 + refl ** 2 * d_norm ** 2 / norm ** 4 ) - # Hold on to normalization to be able to diagnose issues later - self.norm = norm[db_bins] - self.d_norm = d_norm[db_bins] - - # Clean up points where we have no direct beam - zero_db = [not v for v in db_bins] - refl[zero_db] = 0 - d_refl[zero_db] = 0 - # Convert to Q - - self.refl = refl - self.d_refl = d_refl - - + wl_list = self.tof_bins / self.constant + d_theta = self.gravity_correction(self._ws_sc, wl_list) + + if q_summing: + refl, d_refl = self.constant_q (wl_list, refl, d_refl) + self.refl = refl + self.d_refl = d_refl + else: + self.q_bins = np.flip(4.0 * np.pi / wl_list * np.sin(self.theta - d_theta)) + self.refl = np.flip(refl) + self.d_refl = np.flip(d_refl) # Remove leading zeros r = np.trim_zeros(self.refl, "f") @@ -133,10 +130,59 @@ def specular(self, q_summing=False, normalize=True): self.d_refl = self.d_refl[trim:] self.q_bins = self.q_bins[trim:] + return self.q_bins, self.refl, self.d_refl - # Compute Q resolution - #self.dq_over_q = compute_resolution(self._ws_sc, theta=self.theta, q_summing=q_summing) - self.q_summing = q_summing - - return self.q_bins, self.refl, self.d_refl + def constant_q(self, wl_values, signal, signal_err): + """ + Compute reflectivity using constant-Q binning + """ + pixels = np.arange(self.signal_peak[0], self.signal_peak[1]+1) + + _pixel_width = self.pixel_width + + # TODO gravity correction + x_distance = _pixel_width * (pixels - self.specular_pixel ) + delta_theta_f = np.arctan(x_distance / self.det_distance) / 2.0 + + # Sign will depend on reflect up or down + ths_value = self._ws_sc.getRun()["ths"].value[-1] + delta_theta_f *= np.sign(ths_value) + + # Calculate Qz for each pixel + LL, TT = np.meshgrid(wl_values, delta_theta_f) + qz = 4 * np.pi / LL * np.sin(self.theta + TT) * np.cos(TT) + qz = qz.T + + # We use np.digitize to bin. The output of digitize is a bin + # number for each entry, starting at 1. The first bin (0) is + # for underflow entries, and the last bin is for overflow entries. + n_q_values = len(self.q_bins) + refl = np.zeros(n_q_values - 1) + refl_err = np.zeros(n_q_values - 1) + signal_n = np.zeros(n_q_values - 1) + + # Number of wavelength bins + n_wl = qz.shape[0]-1 + # Number of pixels + n_pix = qz.shape[1] + + for tof in range(n_wl): + # When digitizing, the first and last bins are out-of-bounds values + z_inds = np.digitize(qz[tof], self.q_bins) + + # Move the indices so the valid bin numbers start at zero, + # since this is how we are going to address the output array + z_inds -= 1 + + for ix in range(n_pix): + if z_inds[ix] < n_q_values - 1 and z_inds[ix] >= 0 and not np.isnan(signal[ix][tof]): + refl[z_inds[ix]] += signal[ix][tof] + refl_err[z_inds[ix]] += signal_err[ix][tof] ** 2 + signal_n[z_inds[ix]] += 1.0 + + signal_n = np.where(signal_n > 0, signal_n, 1) + refl = float(signal.shape[0]) * refl / signal_n + refl_err = float(signal.shape[0]) * np.sqrt(refl_err) / signal_n + + return refl, refl_err From 49f5c20f2af727bc3578ddffefb149a5f9d7e7cd Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Fri, 17 Jan 2025 12:53:40 -0500 Subject: [PATCH 3/5] Clean up --- reduction/lr_reduction/background.py | 7 +- reduction/lr_reduction/tof_reduction.py | 101 ++++++++++++++++++------ 2 files changed, 81 insertions(+), 27 deletions(-) diff --git a/reduction/lr_reduction/background.py b/reduction/lr_reduction/background.py index 89854e6b..0514024e 100644 --- a/reduction/lr_reduction/background.py +++ b/reduction/lr_reduction/background.py @@ -16,7 +16,7 @@ def find_ranges_without_overlap(r1, r2): Range of pixels to consider r2 : list Range of pixels to exclude - + Returns ------- list @@ -166,8 +166,7 @@ def functional_background( def side_background( - ws, event_reflectivity, peak, bck, low_res, normalize_to_single_pixel=False, q_bins=None, - wl_dist=None, wl_bins=None, q_summing=False + ws, event_reflectivity, peak, bck, low_res, normalize_to_single_pixel=False, q_bins=None, wl_dist=None, wl_bins=None, q_summing=False ): """ Original background substration done using two pixels defining the @@ -195,7 +194,7 @@ def side_background( Array of wavelength bins for the case where we use weighted events for normatization q_summing : bool If True, sum the counts in Q bins - + Returns ------- numpy.ndarray diff --git a/reduction/lr_reduction/tof_reduction.py b/reduction/lr_reduction/tof_reduction.py index f7d699ce..90a34664 100644 --- a/reduction/lr_reduction/tof_reduction.py +++ b/reduction/lr_reduction/tof_reduction.py @@ -1,18 +1,56 @@ +""" +Implementation of reflectivity reduction for time-of-flight data using histograms of time-of-flight values. +""" + import numpy as np from lr_reduction.event_reduction import EventReflectivity class TOFReduction(EventReflectivity): + """ + Traditional implementation of reflectivity reduction for time-of-flight data. + As opposed to the event-based reduction, this implementation is based on + histograms of time-of-flight values. + """ def _reflectivity( - self, ws, _, peak, low_res, sum_pixels=True, **__ + self, + ws, + peak_position, + peak, + low_res, + sum_pixels=True, + **__, # noqa: ARG002 ): """ - Unused parameters: - - q_bins + Overloads the _reflectivity method from EventReflectivity. + This method histograms counts into time-of-flight bins in the selected region + of interest of the detector. + + TODO: This method should be renamed to something more descriptive. + Parameters + ---------- + ws : Mantid workspace + The workspace containing the data + peak_position : int + The position of the peak, unused in this implementation + peak : list + The range of pixels that define the peak + low_res : list + The range in the transverse direction on the detector + sum_pixels : bool + If True, the counts are summed over the pixels in the peak range + Returns + ------- + tuple + A tuple containing: + - refl : numpy.ndarray + The reflectivity values + - d_refl_sq : numpy.ndarray + The uncertainties in the reflectivity values """ charge = ws.getRun().getProtonCharge() shape = len(self.tof_bins) - 1 if sum_pixels else ((peak[1] - peak[0] + 1), len(self.tof_bins) - 1) @@ -51,7 +89,7 @@ def _reflectivity( return refl, d_refl_sq - def specular(self, q_summing=False, normalize=True): + def specular(self, q_summing=False, normalize=True, number_of_bins=300): """ Compute specular reflectivity. @@ -63,18 +101,22 @@ def specular(self, q_summing=False, normalize=True): Turns on constant-Q binning normalize : bool If True, and tof_weighted is False, normalization will be skipped + number_of_bins : int + Number of bins for the time-of-flight axis Returns ------- - q_bins - The Q bin boundaries - refl - The reflectivity values - d_refl - The uncertainties in the reflectivity values + tuple + A tuple containing: + - q_bins : numpy.ndarray + The Q bin boundaries + - refl : numpy.ndarray + The reflectivity values + - d_refl_sq : numpy.ndarray + The uncertainties in the reflectivity values """ # TODO: make this a parameter - self.n_tof_bins = 260 + self.n_tof_bins = number_of_bins tof_bins = np.linspace(self.tof_range[0], self.tof_range[1], self.n_tof_bins + 1) self.tof_bins = tof_bins @@ -86,7 +128,7 @@ def specular(self, q_summing=False, normalize=True): peak=self.signal_peak, low_res=self.signal_low_res, theta=self.theta, - sum_pixels=not q_summing + sum_pixels=not q_summing, ) # Remove background if self.signal_bck is not None: @@ -95,9 +137,7 @@ def specular(self, q_summing=False, normalize=True): d_refl = np.sqrt(d_refl**2 + d_refl_bck**2) if normalize: - norm, d_norm = self._reflectivity( - self._ws_db, peak_position=0, peak=self.norm_peak, low_res=self.norm_low_res, sum_pixels=True - ) + norm, d_norm = self._reflectivity(self._ws_db, peak_position=0, peak=self.norm_peak, low_res=self.norm_low_res, sum_pixels=True) # Direct beam background could be added here. The effect will be negligible. if self.norm_bck is not None: @@ -106,16 +146,14 @@ def specular(self, q_summing=False, normalize=True): d_norm = np.sqrt(d_norm**2 + d_norm_bck**2) refl = refl / norm - d_refl = np.sqrt( - d_refl ** 2 / norm ** 2 + refl ** 2 * d_norm ** 2 / norm ** 4 - ) + d_refl = np.sqrt(d_refl**2 / norm**2 + refl**2 * d_norm**2 / norm**4) # Convert to Q wl_list = self.tof_bins / self.constant d_theta = self.gravity_correction(self._ws_sc, wl_list) if q_summing: - refl, d_refl = self.constant_q (wl_list, refl, d_refl) + refl, d_refl = self.constant_q(wl_list, refl, d_refl) self.refl = refl self.d_refl = d_refl else: @@ -132,17 +170,34 @@ def specular(self, q_summing=False, normalize=True): return self.q_bins, self.refl, self.d_refl - def constant_q(self, wl_values, signal, signal_err): """ Compute reflectivity using constant-Q binning + + Parameters + ---------- + wl_values : numpy.ndarray + The wavelength values + signal : numpy.ndarray + The normalized counts per pixel and wavelength + signal_err : numpy.ndarray + The uncertainties in the counts + + Returns + ------- + tuple + A tuple containing: + - refl : numpy.ndarray + The reflectivity values + - refl_err : numpy.ndarray + The uncertainties in the reflectivity values """ - pixels = np.arange(self.signal_peak[0], self.signal_peak[1]+1) + pixels = np.arange(self.signal_peak[0], self.signal_peak[1] + 1) _pixel_width = self.pixel_width # TODO gravity correction - x_distance = _pixel_width * (pixels - self.specular_pixel ) + x_distance = _pixel_width * (pixels - self.specular_pixel) delta_theta_f = np.arctan(x_distance / self.det_distance) / 2.0 # Sign will depend on reflect up or down @@ -163,7 +218,7 @@ def constant_q(self, wl_values, signal, signal_err): signal_n = np.zeros(n_q_values - 1) # Number of wavelength bins - n_wl = qz.shape[0]-1 + n_wl = qz.shape[0] - 1 # Number of pixels n_pix = qz.shape[1] From ad955116c6eaa9dada06b3825ec375ed69b213ae Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Tue, 21 Jan 2025 16:43:32 -0500 Subject: [PATCH 4/5] fix background --- reduction/lr_reduction/background.py | 48 ++++++++++++++----------- reduction/lr_reduction/tof_reduction.py | 32 ++++++++++++----- 2 files changed, 52 insertions(+), 28 deletions(-) diff --git a/reduction/lr_reduction/background.py b/reduction/lr_reduction/background.py index 0514024e..ab614d04 100644 --- a/reduction/lr_reduction/background.py +++ b/reduction/lr_reduction/background.py @@ -119,48 +119,56 @@ def functional_background( pixels = np.asarray(pixels) # Loop over the Q or TOF bins and fit the background - refl_bck = np.zeros(_bck.shape[1]) - d_refl_bck = np.zeros(_bck.shape[1]) + shape = [peak[1]-peak[0]+1, _bck.shape[1]] if q_summing else _bck.shape[1] + refl_bck = np.zeros(shape) + d_refl_bck = np.zeros(shape) for i in range(_bck.shape[1]): # Use average signal for background estimate _estimate = np.mean(_bck[:, i]) linear = LinearModel() pars = linear.make_params(slope=0, intercept=_estimate) - weights = 1 / _d_bck[:, i] # Here we have counts normalized by proton charge, so if we want to - # assign an error of 1 on the counts, it should be 1/charge. + # assign an error of 1 on the counts, it should be scaled by charge. weights[_bck[:, i] == 0] = charge fit = linear.fit(_bck[:, i], pars, method="leastsq", x=pixels, weights=weights) slope = fit.params["slope"].value intercept = fit.params["intercept"].value - d_slope = np.sqrt(fit.covar[0][0]) - d_intercept = np.sqrt(fit.covar[1][1]) # Compute background under the peak - total_bck = 0 - total_err = 0 for k in range(peak[0], peak[1] + 1): - total_bck += intercept + k * slope - total_err += d_intercept**2 + k**2 * d_slope**2 - - _pixel_area = peak[1] - peak[0] + 1.0 - - refl_bck[i] = (slope * (peak[1] + peak[0] + 1) + 2 * intercept) * _pixel_area / 2 - d_refl_bck[i] = ( - np.sqrt(d_slope**2 * (peak[1] + peak[0] + 1) ** 2 + 4 * d_intercept**2 + 4 * (peak[1] + peak[0] + 1) * fit.covar[0][1]) - * _pixel_area - / 2 - ) + if q_summing: + refl_bck[k - peak[0]][i] = intercept + k * slope + d_refl_bck[k - peak[0]][i] = np.sqrt(fit.covar[1][1] + k**2 * fit.covar[0][0] + + 2 * k * fit.covar[0][1]) + else: + refl_bck[i] += intercept + k * slope + d_refl_bck[i] += fit.covar[1][1]**2 + k**2 * fit.covar[0][0]**2 + + #_pixel_area = peak[1] - peak[0] + 1.0 + + #refl_bck[i] = (slope * (peak[1] + peak[0] + 1) + 2 * intercept) * _pixel_area / 2 + #d_refl_bck[i] = ( + # np.sqrt(d_slope**2 * (peak[1] + peak[0] + 1) ** 2 + 4 * d_intercept**2 + 4 * (peak[1] + peak[0] + 1) * fit.covar[0][1]) + # * _pixel_area + # / 2 + #) + + if not q_summing: + d_refl_bck = np.sqrt(d_refl_bck) # In case we neen the background per pixel as opposed to the total sum under the peak - if normalize_to_single_pixel: + if normalize_to_single_pixel and not q_summing: _pixel_area = peak[1] - peak[0] + 1.0 refl_bck /= _pixel_area d_refl_bck /= _pixel_area + elif q_summing and not normalize_to_single_pixel: + _pixel_area = peak[1] - peak[0] + 1.0 + refl_bck *= _pixel_area + d_refl_bck *= _pixel_area return refl_bck, d_refl_bck diff --git a/reduction/lr_reduction/tof_reduction.py b/reduction/lr_reduction/tof_reduction.py index 90a34664..a41531ca 100644 --- a/reduction/lr_reduction/tof_reduction.py +++ b/reduction/lr_reduction/tof_reduction.py @@ -17,11 +17,11 @@ class TOFReduction(EventReflectivity): def _reflectivity( self, ws, - peak_position, + peak_position, # noqa: ARG002 peak, low_res, sum_pixels=True, - **__, # noqa: ARG002 + **__, ): """ Overloads the _reflectivity method from EventReflectivity. @@ -89,7 +89,7 @@ def _reflectivity( return refl, d_refl_sq - def specular(self, q_summing=False, normalize=True, number_of_bins=300): + def specular(self, q_summing=False, normalize=True, number_of_bins=100): """ Compute specular reflectivity. @@ -115,6 +115,7 @@ def specular(self, q_summing=False, normalize=True, number_of_bins=300): - d_refl_sq : numpy.ndarray The uncertainties in the reflectivity values """ + # TODO: make this a parameter self.n_tof_bins = number_of_bins @@ -132,7 +133,7 @@ def specular(self, q_summing=False, normalize=True, number_of_bins=300): ) # Remove background if self.signal_bck is not None: - refl_bck, d_refl_bck = self.bck_subtraction(normalize_to_single_pixel=q_summing, q_summing=q_summing) + refl_bck, d_refl_bck = self.bck_subtraction(normalize_to_single_pixel=True, q_summing=q_summing) refl -= refl_bck d_refl = np.sqrt(d_refl**2 + d_refl_bck**2) @@ -153,7 +154,7 @@ def specular(self, q_summing=False, normalize=True, number_of_bins=300): d_theta = self.gravity_correction(self._ws_sc, wl_list) if q_summing: - refl, d_refl = self.constant_q(wl_list, refl, d_refl) + refl, d_refl = self.constant_q(wl_list, refl, d_refl, d_theta) self.refl = refl self.d_refl = d_refl else: @@ -170,7 +171,7 @@ def specular(self, q_summing=False, normalize=True, number_of_bins=300): return self.q_bins, self.refl, self.d_refl - def constant_q(self, wl_values, signal, signal_err): + def constant_q(self, wl_values, signal, signal_err, d_theta): """ Compute reflectivity using constant-Q binning @@ -182,6 +183,8 @@ def constant_q(self, wl_values, signal, signal_err): The normalized counts per pixel and wavelength signal_err : numpy.ndarray The uncertainties in the counts + d_theta : numpy.ndarray + Theta offset due to gravity correction Returns ------- @@ -196,7 +199,6 @@ def constant_q(self, wl_values, signal, signal_err): _pixel_width = self.pixel_width - # TODO gravity correction x_distance = _pixel_width * (pixels - self.specular_pixel) delta_theta_f = np.arctan(x_distance / self.det_distance) / 2.0 @@ -206,7 +208,8 @@ def constant_q(self, wl_values, signal, signal_err): # Calculate Qz for each pixel LL, TT = np.meshgrid(wl_values, delta_theta_f) - qz = 4 * np.pi / LL * np.sin(self.theta + TT) * np.cos(TT) + dTT, _ = np.meshgrid(d_theta, delta_theta_f) + qz = 4 * np.pi / LL * np.sin(self.theta - dTT + TT) qz = qz.T # We use np.digitize to bin. The output of digitize is a bin @@ -240,4 +243,17 @@ def constant_q(self, wl_values, signal, signal_err): refl = float(signal.shape[0]) * refl / signal_n refl_err = float(signal.shape[0]) * np.sqrt(refl_err) / signal_n + # The following is for information purposes + x0 = _pixel_width * (self.specular_pixel - self.signal_peak[0]) + x1 = _pixel_width * (self.specular_pixel - self.signal_peak[1]) + delta_theta_f0 = np.arctan(x0 / self.det_distance) / 2.0 + delta_theta_f1 = np.arctan(x1 / self.det_distance) / 2.0 + + qz_max = 4.0 * np.pi / self.tof_range[1] * self.constant * np.fabs(np.sin(self.theta + delta_theta_f0)) + qz_min = 4.0 * np.pi / self.tof_range[1] * self.constant * np.fabs(np.sin(self.theta + delta_theta_f1)) + mid_point = (qz_max + qz_min) / 2.0 + + print("Qz range: ", mid_point) + self.summing_threshold = mid_point + return refl, refl_err From 0ed1534c7716fd059df6f4fb7b6aab8d764001be Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Tue, 21 Jan 2025 17:33:30 -0500 Subject: [PATCH 5/5] fix non-q-summing func bck --- reduction/lr_reduction/background.py | 51 +++++++++++++--------------- 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/reduction/lr_reduction/background.py b/reduction/lr_reduction/background.py index ab614d04..e35066a7 100644 --- a/reduction/lr_reduction/background.py +++ b/reduction/lr_reduction/background.py @@ -139,36 +139,31 @@ def functional_background( intercept = fit.params["intercept"].value # Compute background under the peak - for k in range(peak[0], peak[1] + 1): - if q_summing: - refl_bck[k - peak[0]][i] = intercept + k * slope - d_refl_bck[k - peak[0]][i] = np.sqrt(fit.covar[1][1] + k**2 * fit.covar[0][0] - + 2 * k * fit.covar[0][1]) - else: - refl_bck[i] += intercept + k * slope - d_refl_bck[i] += fit.covar[1][1]**2 + k**2 * fit.covar[0][0]**2 - - #_pixel_area = peak[1] - peak[0] + 1.0 - - #refl_bck[i] = (slope * (peak[1] + peak[0] + 1) + 2 * intercept) * _pixel_area / 2 - #d_refl_bck[i] = ( - # np.sqrt(d_slope**2 * (peak[1] + peak[0] + 1) ** 2 + 4 * d_intercept**2 + 4 * (peak[1] + peak[0] + 1) * fit.covar[0][1]) - # * _pixel_area - # / 2 - #) + if q_summing: + for k in range(peak[0], peak[1] + 1): + if q_summing: + refl_bck[k - peak[0]][i] = intercept + k * slope + d_refl_bck[k - peak[0]][i] = np.sqrt(fit.covar[1][1] + k**2 * fit.covar[0][0] + + 2 * k * fit.covar[0][1]) + if not normalize_to_single_pixel: + _pixel_area = peak[1] - peak[0] + 1.0 + refl_bck *= _pixel_area + d_refl_bck *= _pixel_area + else: + _pixel_area = peak[1] - peak[0] + 1.0 - if not q_summing: - d_refl_bck = np.sqrt(d_refl_bck) + refl_bck[i] = (slope * (peak[1] + peak[0] + 1) + 2 * intercept) * _pixel_area / 2 + d_refl_bck[i] = ( + np.sqrt(fit.covar[0][0] * (peak[1] + peak[0] + 1) ** 2 + 4 * fit.covar[1][1] + + 4 * (peak[1] + peak[0] + 1) * fit.covar[0][1] + ) * _pixel_area / 2 + ) - # In case we neen the background per pixel as opposed to the total sum under the peak - if normalize_to_single_pixel and not q_summing: - _pixel_area = peak[1] - peak[0] + 1.0 - refl_bck /= _pixel_area - d_refl_bck /= _pixel_area - elif q_summing and not normalize_to_single_pixel: - _pixel_area = peak[1] - peak[0] + 1.0 - refl_bck *= _pixel_area - d_refl_bck *= _pixel_area + # In case we neen the background per pixel as opposed to the total sum under the peak + if normalize_to_single_pixel: + _pixel_area = peak[1] - peak[0] + 1.0 + refl_bck /= _pixel_area + d_refl_bck /= _pixel_area return refl_bck, d_refl_bck