diff --git a/AnalyzePDE.py b/AnalyzePDE.py deleted file mode 100644 index a731cd0..0000000 --- a/AnalyzePDE.py +++ /dev/null @@ -1,992 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Apr 5 15:55:20 2022 - -@author: lab-341 -""" - -import numpy as np -import lmfit as lm -import matplotlib.pyplot as plt - -# import GainCalibration_2022 as GainCalibration -import pandas as pd -from ProcessWaveforms_MultiGaussian import WaveformProcessor - - -# def CA_func(x, A, B, C): -# return (A * np.exp(B*(x)) + 1.0) / (1.0 + A) - 1.0 + C -def CA_func(x, A, B): - return (A * np.exp(B*(x)) + 1.0) / (1.0 + A) - 1.0 - - -class SPE_data: - def __init__( - self, - campaign: list[WaveformProcessor], - invC: float, - invC_err: float, - filtered: bool, - ) -> None: - """ - Initialize the SPE_data class. This class is used for collecting all the WaveformProcessor results for - an entire campaign into one object. Can plot absolute gain and Correlated Avalance (CA) as a - function of bias voltage. - - Parameters: - campaign (List[WaveformProcessor]): The campaign data. - invC (float): The inverse of the capacitance. - invC_err (float): The error in the inverse of the capacitance. - filtered (bool): A flag indicating whether the data is filtered. - - Returns: - None - """ - self.campaign = campaign - self.invC = invC - self.invC_err = invC_err - self.filtered = filtered - self.analyze_spe() - - def analyze_spe(self) -> None: - """ - Analyze the single photoelectron (SPE) data. - - This method unpacks the SPE amplitudes, bias voltages, and CA calculation from the - WaveformProcessor objects. It then performs a linear fit on the SPE values as a - function of bias voltage, and determines the breakdown voltage. It also calculates - and populates the absolute gain and overvoltage attributes. - - Returns: - None - """ - self.bias_vals = [] - self.bias_err = [] - self.spe_vals = [] - self.spe_err = [] - self.absolute_spe_vals = [] - self.absolute_spe_err = [] - self.CA_vals = [] - self.CA_err = [] - self.CA_rms_vals = [] - self.CA_rms_err = [] - for wp in self.campaign: - self.bias_vals.append(wp.info.bias) - self.bias_err.append(0.0025 * wp.info.bias + 0.015) # error from keysight - curr_spe = wp.get_spe() - self.spe_vals.append(curr_spe[0]) - self.spe_err.append(curr_spe[1]) - - self.bias_vals = np.array(self.bias_vals) - self.bias_err = np.array(self.bias_err) - self.spe_vals = np.array(self.spe_vals) - self.spe_err = np.array(self.spe_err) - self.absolute_spe_vals = self.spe_vals / (self.invC * 1.60217662e-7) - self.absolute_spe_err = self.absolute_spe_vals * np.sqrt( - (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) - + (self.invC_err * self.invC_err) / (self.invC * self.invC) - ) - spe_wgts = [1.0 / curr_std for curr_std in self.spe_err] - absolute_spe_wgts = [1.0 / curr_std for curr_std in self.absolute_spe_err] - - model = lm.models.LinearModel() - params = model.make_params() - self.spe_res = model.fit( - self.spe_vals, params=params, x=self.bias_vals, weights=spe_wgts - ) - self.absolute_spe_res = model.fit( - self.absolute_spe_vals, - params=params, - x=self.bias_vals, - weights=absolute_spe_wgts, - ) # linear fit - b_spe = self.spe_res.params["intercept"].value - m_spe = self.spe_res.params["slope"].value - self.v_bd = -b_spe / m_spe - - vec_spe = np.array([b_spe / (m_spe * m_spe), -1.0 / m_spe]) - # print('check ' + str(self.bias_vals)) - self.v_bd_err = np.sqrt( - np.matmul( - np.reshape(vec_spe, (1, 2)), - np.matmul(self.spe_res.covar, np.reshape(vec_spe, (2, 1))), - )[0, 0] - ) # breakdown error calculation using covariance matrix - - self.ov = [] - self.ov_err = [] - - for b, db in zip(self.bias_vals, self.bias_err): - curr_ov = b - self.v_bd - curr_ov_err = np.sqrt(db * db) - self.ov.append(curr_ov) - self.ov_err.append(curr_ov_err) - - # self.bias_vals.append(self.v_bd) - # self.bias_err.append(self.v_bd_err) - - # self.ov.append(0.0) - # self.ov_err.append(self.v_bd_err) - - self.ov = np.array(self.ov) - self.ov_err = np.array(self.ov_err) - - spe = self.spe_res.eval(params=self.spe_res.params, x=self.bias_vals) - spe_err = self.spe_res.eval_uncertainty(params=self.spe_res.params, x=self.bias_vals, sigma=1) - for wp, curr_bias, curr_spe, curr_spe_err in zip(self.campaign, self.bias_vals, spe, spe_err): - curr_CA_val, curr_CA_err = wp.get_CA_spe(curr_spe, curr_spe_err) - curr_CA_rms_val, curr_CA_rms_err = wp.get_CA_rms(curr_spe, curr_spe_err) - self.CA_vals.append(curr_CA_val) - self.CA_err.append(curr_CA_err) - self.CA_rms_vals.append(curr_CA_rms_val) - self.CA_rms_err.append(curr_CA_rms_err) - -#include interpolated v_bd value in CA model fit - # bias_inclusive_bd = np.append(self.bias_vals,self.v_bd) - # bias_inclusive_bd_err = np.append(self.bias_err,self.v_bd_err) - - # self.ov = np.append(self.ov, 0.0) - # self.bias_err = np.append(self.ov_err,self.v_bd_err) - # self.CA_vals.append(0.0) #no CA activity at v_bd by definition - # self.CA_err.append(self.campaign[0].get_baseline_fit().params["sigma"].value) - - # self.CA_vals = np.array(self.CA_vals) - # self.CA_err = np.array(self.CA_err) - - # self.CA_rms_vals = np.array(self.CA_rms_vals) - # self.CA_rms_err = np.array(self.CA_rms_err) - - # i am stinky - - CA_model = lm.Model(CA_func) - # CA_params = CA_model.make_params(A=1, B= 1, C = 0) - CA_params = CA_model.make_params(A=1, B=1) - # CA_params['A'].min = 0.1 - # CA_params['C'].min = -5 - # CA_params['C'].max = 5 - CA_wgts = [1.0 / curr_std for curr_std in self.CA_err] - self.CA_res = CA_model.fit( - self.CA_vals, params=CA_params, x=self.ov, weights=CA_wgts - ) - - def get_CA_ov(self, input_ov_vals: np.ndarray) -> tuple[np.ndarray, np.ndarray]: - """ - Evaluate the CA values and their uncertainties at the given overvoltage values. - - Parameters: - input_ov_vals (np.ndarray): The input overvoltage values. - - Returns: - Tuple[np.ndarray, np.ndarray]: A tuple containing the evaluated CA values and their uncertainties. - """ - out_vals = self.CA_res.eval(params=self.CA_res.params, x=input_ov_vals) - out_err = self.CA_res.eval_uncertainty(x=input_ov_vals, sigma=1) - return out_vals, out_err - - def get_spe_ov(self, input_ov_vals: np.ndarray) -> tuple[np.ndarray, np.ndarray]: - """ - Evaluate the SPE values and their uncertainties at the given overvoltage values. - - This method first calculates the bias values by adding the breakdown voltage to the input - overvoltage values. It then evaluates the SPE values and their uncertainties at these bias values. - - Parameters: - input_ov_vals (np.ndarray): The input overvoltage values. - - Returns: - Tuple[np.ndarray, np.ndarray]: A tuple containing the evaluated SPE values and their uncertainties. - """ - input_bias_vals = input_ov_vals + self.v_bd - out_vals = self.spe_res.eval(params=self.spe_res.params, x=input_bias_vals) - out_err = self.spe_res.eval_uncertainty(x=input_bias_vals, sigma=1) - return out_vals, out_err - - def plot_spe( - self, - in_ov: bool = False, - absolute: bool = False, - color: str = "blue", - out_file: str = None, - ) -> None: - """ - Plot the single photoelectron (SPE) data. - - This method creates a plot of the SPE data. It can plot either the absolute gain or the SPE amplitude, - and it can plot the data as a function of either the bias voltage or the overvoltage. The plot includes - the best fit line and its uncertainty, the data points with their errors, and a text box with additional - information about the data and the fit. The plot can be saved to a file. - - Parameters: - in_ov (bool, optional): If True, plot the data as a function of the overvoltage. If False, plot the data as a function of the bias voltage. Default is False. - absolute (bool, optional): If True, plot the absolute gain. If False, plot the SPE amplitude. Default is False. - color (str, optional): The color to use for the text box. Default is 'blue'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - - color = "tab:" + color - fig = plt.figure() - plt.tight_layout() - - start_bias = self.v_bd - end_bias = np.amax(self.bias_vals) + 1.0 - fit_bias = np.linspace(start_bias, end_bias, 20) - - if absolute: - fit_y = self.absolute_spe_res.eval( - params=self.absolute_spe_res.params, x=fit_bias - ) - fit_y_err = self.absolute_spe_res.eval_uncertainty( - x=fit_bias, params=self.absolute_spe_res.params, sigma=1 - ) - fit_label = "Absolute Gain Best Fit" - data_label = "Absolute Gain values" - y_label = "Absolute Gain" - data_y = self.absolute_spe_vals - data_y_err = self.absolute_spe_err - chi_sqr = self.absolute_spe_res.redchi - slope_text = rf"""Slope: {self.absolute_spe_res.params['slope'].value:0.4} $\pm$ {self.absolute_spe_res.params['slope'].stderr:0.2} [1/V]""" - intercept_text = rf"""Intercept: {self.absolute_spe_res.params['intercept'].value:0.4} $\pm$ {self.absolute_spe_res.params['intercept'].stderr:0.2} [V]""" - plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) - else: - fit_y = self.spe_res.eval(params=self.spe_res.params, x=fit_bias) - fit_y_err = self.spe_res.eval_uncertainty( - x=fit_bias, params=self.spe_res.params, sigma=1 - ) - fit_label = "SPE Amplitude Best Fit" - data_label = "SPE Amplitude values" - y_label = "SPE Amplitude [V]" - data_y = self.spe_vals - data_y_err = self.spe_err - chi_sqr = self.spe_res.redchi - slope_text = rf"""Slope: {self.spe_res.params['slope'].value:0.4} $\pm$ {self.spe_res.params['slope'].stderr:0.2} [V/V]""" - intercept_text = rf"""Intercept: {self.spe_res.params['intercept'].value:0.4} $\pm$ {self.spe_res.params['intercept'].stderr:0.2} [V]""" - - parameter_text = slope_text - if in_ov: - fit_x = np.linspace(start_bias - self.v_bd, end_bias - self.v_bd, 20) - data_x = self.ov - data_x_err = self.ov_err - x_label = "Overvoltage [V]" - else: - fit_x = fit_bias - data_x = self.bias_vals - data_x_err = self.bias_err - x_label = "Bias Voltage [V]" - parameter_text += f"""\n""" - parameter_text += intercept_text - - parameter_text += f"""\n""" - parameter_text += rf"""Reduced $\chi^2$: {chi_sqr:0.4}""" - parameter_text += f"""\n""" - plt.fill_between( - fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color="red", alpha=0.5 - ) - plt.plot(fit_x, fit_y, color="red", label=fit_label) - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=6, - fmt=".", - label=data_label, - ) - - plt.ylim(0) - # plt.xlim(0) - plt.xlabel(x_label, loc='right') - plt.ylabel(y_label, loc='top') - plt.legend() - - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" - if self.filtered: - textstr += f"Filtering: Lowpass, 400kHz\n" - else: - textstr += f"Filtering: None\n" - textstr += f"--\n" - textstr += parameter_text - textstr += f"--\n" - textstr += rf"Breakdown Voltage: {self.v_bd:0.4} $\pm$ {self.v_bd_err:0.3} [V]" - - props = dict(boxstyle="round", alpha=0.4) - fig.text(0.6, 0.45, textstr, fontsize=8, verticalalignment="top", bbox=props) - - if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - plt.show() - - def plot_CA(self, color: str = "blue", out_file: str = None) -> None: - """ - Plot the correlated avalanche (CA) as a function of overvoltage. - - This method creates a plot of the CA as a function of overvoltage. The plot includes - the best fit line and its uncertainty, the data points with their errors, and a text box - with additional information about the data and the fit. The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the text box. Default is 'blue'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color - fig = plt.figure() - - data_x = self.ov - # data_x = self.bias_vals - data_x_err = self.bias_err - data_y = self.CA_vals - data_y_err = self.CA_err - - fit_x = np.linspace(0, np.amax(self.ov) + 1.0, num=100) - fit_y = self.CA_res.eval(params=self.CA_res.params, x=fit_x) - # fit_y = [CA_func(x, self.CA_res.params['A'].value, self.CA_res.params['B'].value, self.CA_res.params['C'].value) for x in fit_x] - # print(self.CA_res.params['A'].value) - fit_y_err = self.CA_res.eval_uncertainty( - x=fit_x, params=self.CA_res.params, sigma=1 - ) - - plt.fill_between( - fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color="deeppink", alpha=0.5 - ) - plt.plot( - fit_x, - fit_y, - color="deeppink", - label=r"$\frac{Ae^{B*V_{OV}}+1}{A + 1} - 1$ fit", - ) - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - label=r"$\frac{1}{N}\sum_{i=1}^{N}{\frac{A_i}{\bar{A}_{1 PE}}-1}$", - ) - - x_label = "Overvoltage [V]" - y_label = "Number of CA [PE]" - plt.xlabel(x_label, loc='right') - plt.ylabel(y_label, loc='top') - plt.legend(loc="upper left") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" - if self.filtered: - textstr += f"Filtering: Lowpass, 400kHz\n" - else: - textstr += f"Filtering: None\n" - textstr += f"--\n" - textstr += f"""A: {self.CA_res.params['A'].value:0.3} $\\pm$ {self.CA_res.params['A'].stderr:0.3}\n""" - textstr += f"""B: {self.CA_res.params['B'].value:0.2} $\\pm$ {self.CA_res.params['B'].stderr:0.2}\n""" - # textstr += f"""C: {self.CA_res.params['C'].value:0.2} $\pm$ {self.CA_res.params['C'].stderr:0.2}\n""" - textstr += rf"""Reduced $\chi^2$: {self.CA_res.redchi:0.4}""" - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.15, 0.65, textstr, fontsize=8, verticalalignment="top", bbox=props) - plt.tight_layout() - if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - else: - plt.show() - - def plot_CA_rms(self, color: str = "blue", out_file: str = None) -> None: - """ - Plot the root mean square (RMS) of the charge amplification (CA) as a function of overvoltage. - - This method creates a plot of the RMS of the CA as a function of overvoltage. The plot includes - the data points with their errors and a text box with additional information about the data. - The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the text box. Default is 'blue'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color - fig = plt.figure() - - data_x = self.bias_vals - data_x_err = self.bias_err - # data_x = self.ov - # data_x_err = self.ov_err - data_y = self.CA_rms_vals - data_y_err = self.CA_rms_err - - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - label=r"$\sqrt{\frac{\sum_{i=1}^{N}\left(\frac{A_i}{\bar{A}_{1 PE}}-\left(\langle\Lambda\rangle+1\right)\right)^2}{N}}$", - ) - - x_label = "Bias voltage [V]" - y_label = "RMS CAs [PE]" - plt.xlabel(x_label) - plt.ylabel(y_label) - plt.legend(loc="upper left") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" - if self.filtered: - textstr += f'Filtering: Lowpass, 400kHz\n' - # textstr += f"Filtering: Bandpass [1E4, 1E6]\n" - else: - textstr += f"Filtering: None\n" - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.15, 0.65, textstr, fontsize=8, verticalalignment="top", bbox=props) - plt.tight_layout() - if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - else: - plt.show() - - # I HAVE A SECRET TO TELL YOU! (it was reed who wrote that message and thwy are pinning it on me) - - -# it worked. - - -class Alpha_data: - def __init__( - self, - campaign: list[WaveformProcessor], - invC: float, - invC_err: float, - spe_data: SPE_data, - v_bd: float, - v_bd_err: float, - ) -> None: - """ - Initialize the Alpha_data class. This class is used for collecting all the WaveformProcessor results for - an entire campaign into one object. Can then be used to determine PDE and the equivalent number of detected photons. - - Parameters: - campaign (List): The campaign data. - invC (float): The inverse of the capacitance. - invC_err (float): The error in the inverse of the capacitance. - spe_data (SPE_data): The single photoelectron data. - v_bd (float): The breakdown voltage. - v_bd_err (float): The error in the breakdown voltage. - - Returns: - None - """ - self.campaign = campaign - self.invC = invC - self.invC_err = invC_err - self.spe_data = spe_data - self.v_bd = v_bd - self.v_bd_err = v_bd_err - self.analyze_alpha() - - def analyze_alpha(self) -> None: - """ - Analyze the alpha data. - - This method calculates various parameters related to the alpha data such as bias values, - alpha values, overvoltage values, and the number of detected photons. - - Returns: - None - """ - self.bias_vals = [] - self.bias_err = [] - self.alpha_vals = [] - self.alpha_err = [] - - self.sigma_vals = [] - self.sigma_err = [] - - for wp in self.campaign: - self.bias_vals.append(wp.info.bias) - self.bias_err.append(0.0025 * wp.info.bias + 0.015) - # self.bias_err.append(0.005) - curr_alpha = wp.get_alpha() - self.alpha_vals.append(curr_alpha[0]) - self.alpha_err.append(curr_alpha[1]) - curr_sigma = wp.get_sigma() - self.sigma_vals.append(curr_sigma[0]) - self.sigma_err.append(curr_sigma[1]) - - self.bias_vals = np.array(self.bias_vals) - self.bias_err = np.array(self.bias_err) - self.alpha_vals = np.array(self.alpha_vals) - self.alpha_err = np.array(self.alpha_err) - - self.ov = [] - self.ov_err = [] - - for b, db in zip(self.bias_vals, self.bias_err): - curr_ov = b - self.v_bd - curr_ov_err = np.sqrt(db * db + self.v_bd_err * self.v_bd_err) - self.ov.append(curr_ov) - self.ov_err.append(curr_ov_err) - - self.ov = np.array(self.ov) - self.ov_err = np.array(self.ov_err) - - self.sigma_vals = np.array(self.sigma_vals) - self.sigma_err = np.array(self.sigma_err) - - self.CA_vals, self.CA_err = self.spe_data.get_CA_ov(self.ov) - self.spe_vals, self.spe_err = self.spe_data.get_spe_ov(self.ov) - print("CA Vals: " + str(self.CA_vals)) - print("SPE Vals: " + str(self.spe_vals)) - -# in p.e. units - self.sigma_in_spe_units = self.sigma_vals/self.spe_vals * self.spe_data.invC/self.invC - self.sigma_in_spe_units_err = self.sigma_in_spe_units * np.sqrt((self.sigma_err * self.sigma_err)/(self.sigma_vals * self.sigma_vals) - + (self.spe_err*self.spe_err)/(self.spe_vals*self.spe_vals) - + (self.invC_err*self.invC_err)/(self.invC*self.invC) - + (self.spe_data.invC_err*self.spe_data.invC_err)/(self.spe_data.invC*self.spe_data.invC) - ) - self.alpha_in_spe_units = self.alpha_vals/self.spe_vals * self.spe_data.invC/self.invC - self.alpha_in_spe_units_err = self.alpha_in_spe_units * np.sqrt((self.alpha_err * self.alpha_err)/(self.alpha_vals * self.alpha_vals) - + (self.spe_err*self.spe_err)/(self.spe_vals*self.spe_vals) - + (self.invC_err*self.invC_err)/(self.invC*self.invC) - + (self.spe_data.invC_err*self.spe_data.invC_err)/(self.spe_data.invC*self.spe_data.invC) - ) - - self.num_det_photons = ( - self.alpha_vals - * self.spe_data.invC - / (self.spe_vals * self.invC * (1.0 + self.CA_vals)) - ) - self.num_det_photons_err = self.num_det_photons * np.sqrt( - (self.alpha_err * self.alpha_err) / (self.alpha_vals * self.alpha_vals) - + (self.spe_data.invC_err * self.spe_data.invC_err) - / (self.spe_data.invC * self.spe_data.invC) - + (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) - + (self.invC_err * self.invC_err) / (self.invC * self.invC) - + (self.CA_err * self.CA_err) / (self.CA_vals * self.CA_vals) - ) - - def plot_alpha(self, color: str = "purple", units = "Volts", x = "Bias", out_file: str = None) -> None: - """ - Plot the alpha data as a function of overvoltage. - - This method creates a plot of the alpha data as a function of overvoltage. The plot includes - the data points with their errors and a text box with additional information about the data. - The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the data points. Default is 'purple'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color - fig = plt.figure() - fig.tight_layout() - plt.rc("font", size=12) - - - if x == "OV": - data_x = self.ov - data_x_err = self.bias_err - x_label = "Overvoltage [V]" - if x == "Bias": - data_x = self.bias_vals - data_x_err = self.bias_err - x_label = "Bias voltage [V]" - - if units == "Volts": - data_y = self.alpha_vals - data_y_err = self.alpha_err - y_label = "Alpha Pulse Amplitude [V]" - if units == "PE": - data_y = self.alpha_in_spe_units - data_y_err = self.alpha_in_spe_units_err - y_label = "Alpha Amplitude [p.e.]" - - # fit_x = np.linspace(0.0, np.amax(self.ov) + 1.0, num = 100) - # fit_y = self.CA_res.eval(params=self.CA_res.params, x = fit_x) - # fit_y_err = self.CA_res.eval_uncertainty(x = fit_x, params = self.CA_res.params, sigma = 1) - - # plt.fill_between(fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color = 'red', alpha = .5) - # plt.plot(fit_x, fit_y, color = 'red', label = r'$\frac{Ae^{B*V_{OV}}+1}{A + 1} - 1$ fit') - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - ) - plt.xlabel(x_label) - plt.ylabel(y_label) - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]" - # if self.filtered: - # textstr += f'Filtering: Lowpass, 400kHz\n' - # else: - # textstr += f'Filtering: None\n' - # textstr += f'--\n' - # textstr += f'''A: {self.CA_res.params['A'].value:0.3f} $\pm$ {self.CA_res.params['A'].stderr:0.3f}\n''' - # textstr += f'''B: {self.CA_res.params['B'].value:0.2} $\pm$ {self.CA_res.params['B'].stderr:0.2}\n''' - # textstr += rf'''Reduced $\chi^2$: {self.CA_res.redchi:0.4}''' - - plt.grid(True) - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.15, 0.75, textstr, fontsize=12, verticalalignment="top", bbox=props) - if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - plt.show() - - - def plot_sigma(self, color: str = "purple", units = "Volts", x = "Bias", out_file: str = None) -> None: - """ - Plot the fitted alpha sigmas as a function of overvoltage. - - Parameters: - color (str, optional): The color to use for the data points. Default is 'purple'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color - fig = plt.figure() - fig.tight_layout() - plt.rc("font", size=12) - - if x == "OV": - data_x = self.ov - data_x_err = self.bias_err - x_label = "Overvoltage [V]" - if x == "Bias": - data_x = self.bias_vals - data_x_err = self.bias_err - x_label = "Bias voltage [V]" - - if units == "Volts": - data_y = self.sigma_vals - data_y_err = self.sigma_err - y_label = "Fitted Pulse Sigma [V]" - if units == "PE": - data_y = self.sigma_in_spe_units - data_y_err = self.sigma_in_spe_units_err - y_label = "Fitted Pulse Sigma [p.e.]" - - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - ) - plt.xlabel(x_label) - plt.ylabel(y_label) - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]" - # if self.filtered: - # textstr += f'Filtering: Lowpass, 400kHz\n' - # else: - # textstr += f'Filtering: None\n' - # textstr += f'--\n' - # textstr += f'''A: {self.CA_res.params['A'].value:0.3f} $\pm$ {self.CA_res.params['A'].stderr:0.3f}\n''' - # textstr += f'''B: {self.CA_res.params['B'].value:0.2} $\pm$ {self.CA_res.params['B'].stderr:0.2}\n''' - # textstr += rf'''Reduced $\chi^2$: {self.CA_res.redchi:0.4}''' - - plt.grid(True) - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.15, 0.75, textstr, fontsize=18, verticalalignment="top", bbox=props) - if out_file: - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - plt.show() - - def plot_num_det_photons(self, color: str = "purple", out_file: str = None) -> None: - """ - Plot the number of detected photons as a function of overvoltage. - - This method creates a plot of the number of detected photons as a function of overvoltage. - The plot includes the data points with their errors and a text box with additional information - about the data. The plot can be saved to a file. - - Parameters: - color (str, optional): The color to use for the data points. Default is 'purple'. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color - fig = plt.figure() - - data_x = self.ov - data_x_err = self.bias_err # use error from voltage source, not over correlated error from OV - data_y = self.num_det_photons - data_y_err = self.num_det_photons_err - - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - ) - - plt.xlabel("Overvoltage [V]") - plt.ylabel("Number of Detected Photons") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]" - # if self.filtered: - # textstr += f'Filtering: Lowpass, 400kHz\n' - # else: - # textstr += f'Filtering: None\n' - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.3, 0.4, textstr, fontsize=18, verticalalignment="top", bbox=props) - plt.xlim(0, np.amax(self.ov) + 1.0) - ylow, yhigh = plt.ylim() - plt.ylim(-1, yhigh * 1.1) - plt.tight_layout() - plt.grid(True) - plt.show() - - if out_file: - x_label = "Overvoltage [V]" - y_label = "Number of Detected Photons" - data = { - x_label: data_x, - x_label + " error": data_x_err, - y_label: data_y, - y_label + " error": data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - - def plot_PDE( - self, - num_incident: int, - color: str = "purple", - other_data: list = None, - out_file: str = None, - legtext: str = "", - ) -> None: - """ - Plot the Photon Detection Efficiency (PDE) as a function of overvoltage. - - This method creates a plot of the PDE as a function of overvoltage. The plot includes - the data points with their errors and a text box with additional information about the data. - The plot can also include data from other sources. The plot can be saved to a file. - - Parameters: - num_incident (int): The number of incident photons. - color (str, optional): The color to use for the data points. Default is 'purple'. - other_data (List, optional): A list of other data to include in the plot. Each item in the list should be a dictionary with keys 'ov', 'pde', 'ov_err', 'pde_err', and 'label'. Default is None. - out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. - - Returns: - None - """ - color = "tab:" + color - fig = plt.figure() - - data_x = self.ov - data_x_err = self.ov_err - data_y = self.num_det_photons / num_incident - data_y_err = self.num_det_photons_err / num_incident - - plt.errorbar( - data_x, - data_y, - xerr=data_x_err, - yerr=data_y_err, - markersize=10, - fmt=".", - color=color, - label="UMass, 175nm, 190K", - ) - - if other_data: - for od in other_data: - plt.errorbar( - od.ov, - od.pde, - xerr=od.ov_err, - yerr=od.pde_err, - markersize=10, - fmt=".", - label=od.label, - ) - - plt.xlabel("Overvoltage [V]") - plt.ylabel("Photon Detection Efficiency") - textstr = f"Date: {self.campaign[0].info.date}\n" - textstr += f"Condition: {self.campaign[0].info.condition}\n" - textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" - textstr += f"PTE: {legtext}" - - props = dict(boxstyle="round", facecolor=color, alpha=0.4) - fig.text(0.3, 0.4, textstr, fontsize=18, verticalalignment="top", bbox=props) - plt.xlim(0, np.amax(self.ov) + 1.0) - ylow, yhigh = plt.ylim() - plt.ylim(-0.01, yhigh * 1.1) - if other_data: - plt.legend(loc="lower left") - plt.tight_layout() - plt.grid(True) - plt.show() - if out_file: - data = { - 'ov': data_x, 'ov error': data_x_err, - 'bias': self.bias_vals, 'bias error': self.bias_err, - 'amps': self.alpha_vals, 'amps error': self.alpha_err, - 'num_det': self.num_det_photons, 'num_det error': self.num_det_photons_err, - 'pde': data_y, 'pde error': data_y_err, - } - df = pd.DataFrame(data) - df.to_csv(out_file) - - -class Collab_PDE: - """ - A class used to represent the Photon Detection Efficiency (PDE) data from a collaboration. - - Attributes: - filename (str): The name of the file containing the PDE data. - groupname (str): The name of the group that collected the data. - wavelength (float): The wavelength of the photons used in the experiment. - temp (float): The temperature at which the data was collected. - df (DataFrame): A pandas DataFrame containing the data from the file. - ov (np.array): An array of overvoltage values. - ov_err (np.array): An array of errors in the overvoltage values. - pde (np.array): An array of PDE values. - pde_err (np.array): An array of errors in the PDE values. - """ - - def __init__( - self, filename: str, groupname: str, wavelength: float, temp: float - ) -> None: - """ - Initialize a Collab_PDE object. - - Parameters: - filename (str): The name of the file containing the PDE data. - groupname (str): The name of the group that collected the data. - wavelength (float): The wavelength of the photons used in the experiment. - temp (float): The temperature at which the data was collected. - - Returns: - None - """ - self.filename = filename - self.groupname = groupname - self.wavelength = wavelength - self.temp = temp - self.df = pd.read_csv(self.filename) - self.ov = np.array(self.df["OV"]) - self.ov_err = np.array(self.df["OV error"]) - self.pde = np.array(self.df["PDE"]) / 100.0 - self.pde_err = np.array(self.df["PDE error"]) / 100.0 - - -# %% -class multi_campaign: # class to compile multiple campaigns - def __init__( - self, campaigns: list, invC: float, invC_err: float, filtered: bool - ) -> None: - """ - Initialize a multi_campaign object. - - Parameters: - campaigns (List): A list of campaigns to compile. - invC (float): The inverse of the capacitance of the device. - invC_err (float): The error in the inverse of the capacitance. - filtered (bool): Whether the data has been filtered. - - Returns: - None - """ - self.campaigns = campaigns - self.invC = invC - self.invC_err = invC_err - self.filtered = filtered - self.create_SPEs() - - def create_SPEs( - self, - ) -> None: # does SPE_data on all the campaigns and returns a list of objects - """ - Create SPE_data objects for all the campaigns. - - This method creates an SPE_data object for each campaign in the list of campaigns and stores them in the data attribute. - - Parameters: - None - - Returns: - None - """ - self.data = [] - for curr_campaign in self.campaigns: - self.data.append( - SPE_data( - curr_campaign, invC_spe_filter, invC_spe_err_filter, filtered=True - ) - ) - - -# %% -# ihep_pde = Collab_PDE('C:/Users/lab-341/Desktop/Analysis/fresh_start/PDE_175nm_HD3_iHEP_233K.csv', 'IHEP', 175, 233) -# triumf_pde = Collab_PDE('C:/Users/lab-341/Desktop/Analysis/fresh_start/PDE_176nm_HD3_Triumf_163K.csv', 'TRIUMF', 176, 163) diff --git a/src/AnalysisDefinitions.py b/src/AnalysisDefinitions.py new file mode 100644 index 0000000..c0c7c68 --- /dev/null +++ b/src/AnalysisDefinitions.py @@ -0,0 +1,255 @@ +import numpy as np +import matplotlib.pyplot as plt +import os +from ProcessWaveforms import ProcessWaveforms +from MeasurementInfo import MeasurementInfo +from ProcessHistograms import ProcessHist +from AnalyzePDE import SPE_data +import h5py +import time + +class AnalysisUltimate: + def __init__(self, + h5_folder_path: str, + ): + """ + Creates an AnalysisUltimate obejct for performing SPE analysis. + + Args: + h5_folder_path (str): the path to the folder that holds all the hdf5 datafiles (and nothing else). Must end in '/'. + """ + self.folder_path = h5_folder_path + self.files = os.listdir(self.folder_path) + self.measurementinfos = {} # keys are hdf5 file names, data is a ProcessWaveforms object + self.biases = {} # keys are hdf5 file names, data is bias voltage + for name in self.files: + f = h5py.File(self.folder_path+name,'r') # get hdf5 file + group_names = list(f['RunData'].keys()) + self.biases[name] = f['RunData'].get(group_names[0]).attrs['Bias(V)'] + self.files = {f:self.biases[f] for f in self.files} + self.biases = {self.biases[f]:f for f in self.biases} + + def args_processwaveforms(self, + acquisition: dict[str] | None = None, + do_filter: dict | bool = False, + plot_waveforms: dict | bool = False, + upper_limit: dict | float = -1, + baseline_correct: dict | bool = False, + poly_correct: dict | bool = False, + prominence: dict | float = None, + fourier: dict | bool = False, + condition: dict | str = "unspecified medium (GN/LXe/Vacuum)", + num_waveforms: dict | float = 0, + ): + """ + Initialize the arguments for Process waveform data from hdf5 file to find peaks. + + Apply filtering and baseline correciton to waveforms. Can process alpha pulse waveforms, + SPE waveforms, or baseline data. Records alpha pulse heights, SPE pulse heights, or + aggregate all ampltiudes respectively. Optionally plots waveforms to inspect data for + tuning peak finding. + + All arguments should be dictionaries with biases as keys, specifying any non-default parameters. + Arguments that are not given as dictionaries will be applied to all, including any pre-breakdown. + + Once run, these can be freely inspected and edited via the attribute self.processwaveforms_arguments. + + To create the MeasurementInfo objects for each bias, run the do_processwaveforms method. + + Args: + f (list): list of h5 file names + acquisition (str, optional): specified file name. Defaults to 'placeholder'. + do_filter (bool, optional): activates butterworth lowpass filter if True. Defaults to False. + plot_waveforms (bool, optional): plots waveforms if True. Defaults to False. + upper_limit (float, optional): amplitude threshold for discarding waveforms. Set to -1 for automatic setting. Defaults to -1. + baseline_correct (bool, optional): baseline corrects waveforms if True. Defaults to False. + prominence (float, optional): parameter used for peak finding algo. None will run auto_prominence. Defaults to None. + fourier (bool, optional): if True performs fourier frequency subtraction. Defaults to False. + num_waveforms: (float, optional): number of oscilloscope traces to read in before stopping; default 0 reads everything + """ + self.processwaveforms_arguments = {'acquisition': acquisition, + 'do_filter': do_filter, + 'plot_waveforms': plot_waveforms, + 'upper_limit': upper_limit, + 'baseline_correct': baseline_correct, + 'poly_correct': poly_correct, + 'prominence': prominence, + 'fourier': fourier, + 'condition': condition, + 'num_waveforms': num_waveforms, + } + for a in self.processwaveforms_arguments: + if type(self.processwaveforms_arguments[a]) is not dict: + self.processwaveforms_arguments[a] = {v: self.processwaveforms_arguments[a] for v in self.biases} + for v in self.biases: + if v not in self.processwaveforms_arguments[a]: + self.processwaveforms_arguments[a][v] = 'not set' + + def do_processwaveforms(self, + temperature: float, + biases: list[float] | None = None, + timeit: bool = False, + ): + """ + Create and run temporary ProcessWaveform objects on the given biases using the arguments created in self.args_processwaveforms. + MeasurementInfo objects are created and stored in self.measurementinfos as a dictionary, keyed by bias. + + Args: + temperature (float): temperature of the experiment + biases (list of floats, optional): which biases to evaluate or reevaluate. Does not automatically include the breakdown. None does all given biases. Defaults to None. + timeit (bool, optional): time how long this method takes. Defaults to False. + """ + if timeit: + t0 = time.time() + if not biases: + biases = list(self.biases) + + if not set(biases).issubset(set(list(self.biases))): + raise Exception("biases must either be a subset of self.biases's keys or be None") + + + runs = {} # keys are bias voltages, data is ProcessWaveforms object + for bias in biases: + f = self.folder_path + self.biases[bias] + warg = {k: self.processwaveforms_arguments[k][bias] for k in self.processwaveforms_arguments if self.processwaveforms_arguments[k][bias] != 'not set'} + if bias < 25: + warg['is_pre_bd'] = True + bias = 'prebreakdown' + runs[bias] = ProcessWaveforms([f],**warg) + if 'prebreakdown' not in runs: + runs['prebreakdown'] = None + for bias in biases: + if bias > 25: + self.measurementinfos[bias] = MeasurementInfo(self.processwaveforms_arguments['condition'][bias],temperature,runs[bias],runs['prebreakdown']) + if timeit: + t1 = time.time() + print(f'do_processwaveforms took {t1-t0:0.4} seconds to complete.') + def guess_pe_locations(self, + get_values: bool = False, + show_plots: bool = True, + ) -> dict | None: + """ + Creates an estimation for the 1PE amplitude for use in ProcessHist via taking the mode. + These are not stored. + + Args: + get_values (bool, optional): do you want to return the PE values? If so, it will be in a dictionary keyed by bias. Defaults to False. + show_plots (bool, optional): do you want to see the histograms? Defaults to True. + """ + maxes = {} + figs = {} + axes = {} + for bias in self.biases: + if bias < 25: + continue + m = self.measurementinfos[bias] + figs[bias],axes[bias] = plt.subplots(1,1) + n,bins,_ = axes[bias].hist(m.all_peaks,bins = 1000,color='blue') + maxes[bias] = [bins[np.argmax(n)] * i for i in range(1,10)] + [axes[bias].axvline(x=v,color='red') for v in maxes[bias]] + axes[bias].set_title(f'{bias} V bias') + if show_plots: + plt.show() + [plt.close(figs[v]) for v in figs] + if get_values: + return maxes + + + def args_processhistograms(self, + centers: dict[list[float]], + baseline_correct: dict[bool] = False, + cutoff: dict[tuple[float,float]] = (0,np.inf), + peak_range: dict[int] = 4, + background_linear: dict[bool] = True, + peaks: dict[str] = 'all' + ): + """ + Process and histogram the identified peaks and fit with a multi-gaussian to extract the + SPE amplitude. + + Once run, these can be freely inspected and edited via the attribute self.processhsit_arguments. + + To create the ProcessHist objects, run self.create_processhistograms. + + Args: + centers (list[float]): Initial guesses for centroid of each gaussian. + baseline_correct (bool,optional): Boolean value indicating if baseline correction needs to be applied. Defaults to False. + cutoff (tuple[float, float],optional): Low and high cutoff values. Defaults to (0,np.inf). + peak_range (int,optional): The number of peaks you want to fit. Defaults to 4. + background_linear (bool,optional): If to fit a linear or exponential background to the multi-gaussian. Defaults to True. + peaks (str): Which peaks to include, either all, LED, or dark. Defaults to all + """ + self.processhist_arguments = {'info':self.measurementinfos, + 'centers': centers, + 'baseline_correct': baseline_correct, + 'cutoff': cutoff, + 'peak_range': peak_range, + 'background_linear': background_linear, + 'peaks': peaks} + for a in self.processhist_arguments: + if type(self.processhist_arguments[a]) is not dict: + self.processhist_arguments[a] = {v: self.processhist_arguments[a] for v in self.biases} + for v in self.biases: + if v not in self.processhist_arguments[a] and v > 25: + self.processhist_arguments[a][v] = 'not set' + + def create_processhistograms(self): + """ + Create ProcessHist objects on the given biases using the arguments created in args_processhistograms. + These are stored in self.histograms as a dictionary, keyed by bias. They can be called and run manually, + or they can be run automatically via self.do_processhistograms. + + Does not create a ProcessHist for pre-breakdown voltages + """ + self.histograms = {} + for bias in self.biases: + if bias < 25: + continue + warg = {k: self.processhist_arguments[k][bias] for k in self.processhist_arguments if self.processhist_arguments[k][bias] != 'not set'} + self.histograms[bias] = ProcessHist(**warg) + + def do_processhistograms(self, + biases: list[float] | None = None, + ): + """ + Run the ProcessHist.process_spe for the specified biases. + + Args: + biases (list of floats or None, optional): which biases to evaluate or reevaluate. Automatically excludes pre-breakdown data. None does all biases. Defaults to None. + """ + if not biases: + biases = list(self.biases) + + if not set(biases).issubset(set(list(self.biases))): + raise Exception("biases must either be a subset of self.biases's keys or be None") + + for bias in biases: + if bias < 25: + continue + self.histograms[bias].process_spe() + + def do_spe_data(self, + invC: float, + invC_err: float, + filtered: bool, + do_CA: bool = False, + biases: list[float] = None, + ): + """ + Create and run the SPE_data for the current ProcessHists in self.histograms. The class is stored in self.spe_data + + Args: + invC (float): The inverse of the capacitance. + invC_err (float): The error in the inverse of the capacitance. + filtered (bool): A flag indicating whether the data is filtered. + do_CA (bool, optional): A flag for whether the class should attempt CA calculation. + biases (list of floats or None, optional): which biases to include. None does all given biases. Defaults to None. + """ + if not biases: + biases = list(self.biases) + + if not (biases <= list(self.biases)): + raise Exception("biases must either be a subset of self.biases's keys or be None") + + campaign = [self.histograms[v] for v in self.histograms] + self.spe_data = SPE_data(campaign,invC,invC_err,filtered,do_CA) \ No newline at end of file diff --git a/src/AnalyzePDE.py b/src/AnalyzePDE.py new file mode 100644 index 0000000..6e12a7d --- /dev/null +++ b/src/AnalyzePDE.py @@ -0,0 +1,477 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Apr 5 15:55:20 2022 + +@author: lab-341 +""" + +import numpy as np +import lmfit as lm +import matplotlib.pyplot as plt + +# import GainCalibration_2022 as GainCalibration +import pandas as pd +from ProcessHistograms import ProcessHist + + +# def CA_func(x, A, B, C): +# return (A * np.exp(B*(x)) + 1.0) / (1.0 + A) - 1.0 + C +def CA_func(x, A, B): + return (A * np.exp(B*(x)) + 1.0) / (1.0 + A) - 1.0 + + +class SPE_data: + def __init__( + self, + campaign: list[ProcessHist], + invC: float, + invC_err: float, + filtered: bool, + do_CA: bool = False, + ) -> None: + """ + Initialize the SPE_data class. This class is used for collecting all the WaveformProcessor results for + an entire campaign into one object. Can plot absolute gain and Correlated Avalance (CA) as a + function of bias voltage. + + Args: + campaign (List[WaveformProcessor]): The campaign data. + invC (float): The inverse of the capacitance. + invC_err (float): The error in the inverse of the capacitance. + filtered (bool): A flag indicating whether the data is filtered. + do_CA (bool, optional): A flag for whether the class should attempt CA calculation. + + Returns: + None + """ + self.campaign = campaign + self.invC = invC + self.invC_err = invC_err + self.filtered = filtered + self.do_CA = do_CA + self.analyze_spe() + + def analyze_spe(self) -> None: + """ + Analyze the single photoelectron (SPE) data. + + This method unpacks the SPE amplitudes, bias voltages, and CA calculation from the + WaveformProcessor objects. It then performs a linear fit on the SPE values as a + function of bias voltage, and determines the breakdown voltage. It also calculates + and populates the absolute gain and overvoltage attributes. + + Returns: + None + """ + self.bias_vals = [] + self.bias_err = [] + self.spe_vals = [] + self.spe_err = [] + self.absolute_spe_vals = [] + self.absolute_spe_err = [] + self.CA_vals = [] + self.CA_err = [] + self.CA_rms_vals = [] + self.CA_rms_err = [] + for wp in self.campaign: + self.bias_vals.append(wp.info.bias) + self.bias_err.append(0.0025 * wp.info.bias + 0.015) # error from keysight + curr_spe = wp.get_spe() + self.spe_vals.append(curr_spe[0]) + self.spe_err.append(curr_spe[1]) + + self.bias_vals = np.array(self.bias_vals) + self.bias_err = np.array(self.bias_err) + self.spe_vals = np.array(self.spe_vals) + self.spe_err = np.array(self.spe_err) + self.absolute_spe_vals = self.spe_vals / (self.invC * 1.60217662e-7) + self.absolute_spe_err = self.absolute_spe_vals * np.sqrt( + (self.spe_err * self.spe_err) / (self.spe_vals * self.spe_vals) + + (self.invC_err * self.invC_err) / (self.invC * self.invC) + ) + spe_wgts = [1.0 / curr_std for curr_std in self.spe_err] + absolute_spe_wgts = [1.0 / curr_std for curr_std in self.absolute_spe_err] + + model = lm.models.LinearModel() + params = model.make_params() + self.spe_res = model.fit( + self.spe_vals, params=params, x=self.bias_vals, weights=spe_wgts + ) + self.absolute_spe_res = model.fit( + self.absolute_spe_vals, + params=params, + x=self.bias_vals, + weights=absolute_spe_wgts, + ) # linear fit + b_spe = self.spe_res.params["intercept"].value + m_spe = self.spe_res.params["slope"].value + self.v_bd = -b_spe / m_spe + + vec_spe = np.array([b_spe / (m_spe * m_spe), -1.0 / m_spe]) + # print('check ' + str(self.bias_vals)) + self.v_bd_err = np.sqrt( + np.matmul( + np.reshape(vec_spe, (1, 2)), + np.matmul(self.spe_res.covar, np.reshape(vec_spe, (2, 1))), + )[0, 0] + ) # breakdown error calculation using covariance matrix + + self.ov = [] + self.ov_err = [] + + for b, db in zip(self.bias_vals, self.bias_err): + curr_ov = b - self.v_bd + curr_ov_err = np.sqrt(db * db) + self.ov.append(curr_ov) + self.ov_err.append(curr_ov_err) + + # self.bias_vals.append(self.v_bd) + # self.bias_err.append(self.v_bd_err) + + # self.ov.append(0.0) + # self.ov_err.append(self.v_bd_err) + + self.ov = np.array(self.ov) + self.ov_err = np.array(self.ov_err) + + spe = self.spe_res.eval(params=self.spe_res.params, x=self.bias_vals) + spe_err = self.spe_res.eval_uncertainty(params=self.spe_res.params, x=self.bias_vals, sigma=1) + for wp, curr_bias, curr_spe, curr_spe_err in zip(self.campaign, self.bias_vals, spe, spe_err): + curr_CA_val, curr_CA_err = wp.get_CA_spe(curr_spe, curr_spe_err) + curr_CA_rms_val, curr_CA_rms_err = wp.get_CA_rms(curr_spe, curr_spe_err) + self.CA_vals.append(curr_CA_val) + self.CA_err.append(curr_CA_err) + self.CA_rms_vals.append(curr_CA_rms_val) + self.CA_rms_err.append(curr_CA_rms_err) + +#include interpolated v_bd value in CA model fit + # bias_inclusive_bd = np.append(self.bias_vals,self.v_bd) + # bias_inclusive_bd_err = np.append(self.bias_err,self.v_bd_err) + + # self.ov = np.append(self.ov, 0.0) + # self.bias_err = np.append(self.ov_err,self.v_bd_err) + # self.CA_vals.append(0.0) #no CA activity at v_bd by definition + # self.CA_err.append(self.campaign[0].get_baseline_fit().params["sigma"].value) + + # self.CA_vals = np.array(self.CA_vals) + # self.CA_err = np.array(self.CA_err) + + # self.CA_rms_vals = np.array(self.CA_rms_vals) + # self.CA_rms_err = np.array(self.CA_rms_err) + + if self.do_CA: + CA_model = lm.Model(CA_func) + # CA_params = CA_model.make_params(A=1, B= 1, C = 0) + CA_params = CA_model.make_params(A=1, B=1) + # CA_params['A'].min = 0.1 + # CA_params['C'].min = -5 + # CA_params['C'].max = 5 + CA_wgts = [1.0 / curr_std for curr_std in self.CA_err] + self.CA_res = CA_model.fit( + self.CA_vals, params=CA_params, x=self.ov, weights=CA_wgts + ) + + def get_CA_ov(self, input_ov_vals: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """ + Evaluate the CA values and their uncertainties at the given overvoltage values. + + Parameters: + input_ov_vals (np.ndarray): The input overvoltage values. + + Returns: + Tuple[np.ndarray, np.ndarray]: A tuple containing the evaluated CA values and their uncertainties. + """ + out_vals = self.CA_res.eval(params=self.CA_res.params, x=input_ov_vals) + out_err = self.CA_res.eval_uncertainty(x=input_ov_vals, sigma=1) + return out_vals, out_err + + def get_spe_ov(self, input_ov_vals: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """ + Evaluate the SPE values and their uncertainties at the given overvoltage values. + + This method first calculates the bias values by adding the breakdown voltage to the input + overvoltage values. It then evaluates the SPE values and their uncertainties at these bias values. + + Parameters: + input_ov_vals (np.ndarray): The input overvoltage values. + + Returns: + Tuple[np.ndarray, np.ndarray]: A tuple containing the evaluated SPE values and their uncertainties. + """ + input_bias_vals = input_ov_vals + self.v_bd + out_vals = self.spe_res.eval(params=self.spe_res.params, x=input_bias_vals) + out_err = self.spe_res.eval_uncertainty(x=input_bias_vals, sigma=1) + return out_vals, out_err + + def plot_spe( + self, + in_ov: bool = False, + absolute: bool = False, + color: str = "blue", + out_file: str = None, + ) -> None: + """ + Plot the single photoelectron (SPE) data. + + This method creates a plot of the SPE data. It can plot either the absolute gain or the SPE amplitude, + and it can plot the data as a function of either the bias voltage or the overvoltage. The plot includes + the best fit line and its uncertainty, the data points with their errors, and a text box with additional + information about the data and the fit. The plot can be saved to a file. + + Args: + in_ov (bool, optional): If True, plot the data as a function of the overvoltage. If False, plot the data as a function of the bias voltage. Default is False. + absolute (bool, optional): If True, plot the absolute gain. If False, plot the SPE amplitude. Default is False. + color (str, optional): The color to use for the text box. Default is 'blue'. + out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. + + Returns: + None + """ + + color = "tab:" + color + fig = plt.figure() + plt.tight_layout() + + start_bias = self.v_bd + end_bias = np.amax(self.bias_vals) + 1.0 + fit_bias = np.linspace(start_bias, end_bias, 20) + + if absolute: + fit_y = self.absolute_spe_res.eval( + params=self.absolute_spe_res.params, x=fit_bias + ) + fit_y_err = self.absolute_spe_res.eval_uncertainty( + x=fit_bias, params=self.absolute_spe_res.params, sigma=1 + ) + fit_label = "Absolute Gain Best Fit" + data_label = "Absolute Gain values" + y_label = "Absolute Gain" + data_y = self.absolute_spe_vals + data_y_err = self.absolute_spe_err + chi_sqr = self.absolute_spe_res.redchi + slope_text = rf"""Slope: {self.absolute_spe_res.params['slope'].value:0.4} $\pm$ {self.absolute_spe_res.params['slope'].stderr:0.2} [1/V]""" + intercept_text = rf"""Intercept: {self.absolute_spe_res.params['intercept'].value:0.4} $\pm$ {self.absolute_spe_res.params['intercept'].stderr:0.2} [V]""" + plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) + else: + fit_y = self.spe_res.eval(params=self.spe_res.params, x=fit_bias) + fit_y_err = self.spe_res.eval_uncertainty( + x=fit_bias, params=self.spe_res.params, sigma=1 + ) + fit_label = "SPE Amplitude Best Fit" + data_label = "SPE Amplitude values" + y_label = "SPE Amplitude [V]" + data_y = self.spe_vals + data_y_err = self.spe_err + chi_sqr = self.spe_res.redchi + slope_text = rf"""Slope: {self.spe_res.params['slope'].value:0.4} $\pm$ {self.spe_res.params['slope'].stderr:0.2} [V/V]""" + intercept_text = rf"""Intercept: {self.spe_res.params['intercept'].value:0.4} $\pm$ {self.spe_res.params['intercept'].stderr:0.2} [V]""" + + parameter_text = slope_text + if in_ov: + fit_x = np.linspace(start_bias - self.v_bd, end_bias - self.v_bd, 20) + data_x = self.ov + data_x_err = self.ov_err + x_label = "Overvoltage [V]" + else: + fit_x = fit_bias + data_x = self.bias_vals + data_x_err = self.bias_err + x_label = "Bias Voltage [V]" + parameter_text += f"""\n""" + parameter_text += intercept_text + + parameter_text += f"""\n""" + parameter_text += rf"""Reduced $\chi^2$: {chi_sqr:0.4}""" + parameter_text += f"""\n""" + plt.fill_between( + fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color="red", alpha=0.5 + ) + plt.plot(fit_x, fit_y, color="red", label=fit_label) + plt.errorbar( + data_x, + data_y, + xerr=data_x_err, + yerr=data_y_err, + markersize=6, + fmt=".", + label=data_label, + ) + + plt.ylim(0) + # plt.xlim(0) + plt.xlabel(x_label, loc='right') + plt.ylabel(y_label, loc='top') + plt.legend() + + textstr = f"Date: {self.campaign[0].info.date}\n" + textstr += f"Condition: {self.campaign[0].info.condition}\n" + textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" + if self.filtered: + textstr += f"Filtering: Lowpass, 400kHz\n" + else: + textstr += f"Filtering: None\n" + textstr += f"--\n" + textstr += parameter_text + textstr += f"--\n" + textstr += rf"Breakdown Voltage: {self.v_bd:0.4} $\pm$ {self.v_bd_err:0.3} [V]" + + props = dict(boxstyle="round", alpha=0.4) + fig.text(0.6, 0.45, textstr, fontsize=8, verticalalignment="top", bbox=props) + + if out_file: + data = { + x_label: data_x, + x_label + " error": data_x_err, + y_label: data_y, + y_label + " error": data_y_err, + } + df = pd.DataFrame(data) + df.to_csv(out_file) + plt.show() + + def plot_CA(self, color: str = "blue", out_file: str = None) -> None: + """ + Plot the correlated avalanche (CA) as a function of overvoltage. + + This method creates a plot of the CA as a function of overvoltage. The plot includes + the best fit line and its uncertainty, the data points with their errors, and a text box + with additional information about the data and the fit. The plot can be saved to a file. + + Parameters: + color (str, optional): The color to use for the text box. Default is 'blue'. + out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. + + Returns: + None + """ + color = "tab:" + color + fig = plt.figure() + + data_x = self.ov + # data_x = self.bias_vals + data_x_err = self.bias_err + data_y = self.CA_vals + data_y_err = self.CA_err + + fit_x = np.linspace(0, np.amax(self.ov) + 1.0, num=100) + fit_y = self.CA_res.eval(params=self.CA_res.params, x=fit_x) + # fit_y = [CA_func(x, self.CA_res.params['A'].value, self.CA_res.params['B'].value, self.CA_res.params['C'].value) for x in fit_x] + # print(self.CA_res.params['A'].value) + fit_y_err = self.CA_res.eval_uncertainty( + x=fit_x, params=self.CA_res.params, sigma=1 + ) + + plt.fill_between( + fit_x, fit_y + fit_y_err, fit_y - fit_y_err, color="deeppink", alpha=0.5 + ) + plt.plot( + fit_x, + fit_y, + color="deeppink", + label=r"$\frac{Ae^{B*V_{OV}}+1}{A + 1} - 1$ fit", + ) + plt.errorbar( + data_x, + data_y, + xerr=data_x_err, + yerr=data_y_err, + markersize=10, + fmt=".", + label=r"$\frac{1}{N}\sum_{i=1}^{N}{\frac{A_i}{\bar{A}_{1 PE}}-1}$", + ) + + x_label = "Overvoltage [V]" + y_label = "Number of CA [PE]" + plt.xlabel(x_label, loc='right') + plt.ylabel(y_label, loc='top') + plt.legend(loc="upper left") + textstr = f"Date: {self.campaign[0].info.date}\n" + textstr += f"Condition: {self.campaign[0].info.condition}\n" + textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" + if self.filtered: + textstr += f"Filtering: Lowpass, 400kHz\n" + else: + textstr += f"Filtering: None\n" + textstr += f"--\n" + textstr += f"""A: {self.CA_res.params['A'].value:0.3} $\\pm$ {self.CA_res.params['A'].stderr:0.3}\n""" + textstr += f"""B: {self.CA_res.params['B'].value:0.2} $\\pm$ {self.CA_res.params['B'].stderr:0.2}\n""" + # textstr += f"""C: {self.CA_res.params['C'].value:0.2} $\pm$ {self.CA_res.params['C'].stderr:0.2}\n""" + textstr += rf"""Reduced $\chi^2$: {self.CA_res.redchi:0.4}""" + props = dict(boxstyle="round", facecolor=color, alpha=0.4) + fig.text(0.15, 0.65, textstr, fontsize=8, verticalalignment="top", bbox=props) + plt.tight_layout() + if out_file: + data = { + x_label: data_x, + x_label + " error": data_x_err, + y_label: data_y, + y_label + " error": data_y_err, + } + df = pd.DataFrame(data) + df.to_csv(out_file) + else: + plt.show() + + def plot_CA_rms(self, color: str = "blue", out_file: str = None) -> None: + """ + Plot the root mean square (RMS) of the charge amplification (CA) as a function of overvoltage. + + This method creates a plot of the RMS of the CA as a function of overvoltage. The plot includes + the data points with their errors and a text box with additional information about the data. + The plot can be saved to a file. + + Parameters: + color (str, optional): The color to use for the text box. Default is 'blue'. + out_file (str, optional): The name of the file to save the plot to. If None, the plot is not saved to a file. Default is None. + + Returns: + None + """ + color = "tab:" + color + fig = plt.figure() + + data_x = self.bias_vals + data_x_err = self.bias_err + # data_x = self.ov + # data_x_err = self.ov_err + data_y = self.CA_rms_vals + data_y_err = self.CA_rms_err + + plt.errorbar( + data_x, + data_y, + xerr=data_x_err, + yerr=data_y_err, + markersize=10, + fmt=".", + label=r"$\sqrt{\frac{\sum_{i=1}^{N}\left(\frac{A_i}{\bar{A}_{1 PE}}-\left(\langle\Lambda\rangle+1\right)\right)^2}{N}}$", + ) + + x_label = "Bias voltage [V]" + y_label = "RMS CAs [PE]" + plt.xlabel(x_label) + plt.ylabel(y_label) + plt.legend(loc="upper left") + textstr = f"Date: {self.campaign[0].info.date}\n" + textstr += f"Condition: {self.campaign[0].info.condition}\n" + textstr += f"RTD4: {self.campaign[0].info.temperature} [K]\n" + if self.filtered: + textstr += f'Filtering: Lowpass, 400kHz\n' + # textstr += f"Filtering: Bandpass [1E4, 1E6]\n" + else: + textstr += f"Filtering: None\n" + + props = dict(boxstyle="round", facecolor=color, alpha=0.4) + fig.text(0.15, 0.65, textstr, fontsize=8, verticalalignment="top", bbox=props) + plt.tight_layout() + if out_file: + data = { + x_label: data_x, + x_label + " error": data_x_err, + y_label: data_y, + y_label + " error": data_y_err, + } + df = pd.DataFrame(data) + df.to_csv(out_file) + else: + plt.show() \ No newline at end of file diff --git a/src/ProcessHistograms.py b/src/ProcessHistograms.py index d4ae430..6c15343 100644 --- a/src/ProcessHistograms.py +++ b/src/ProcessHistograms.py @@ -89,7 +89,7 @@ def fit_peaks_multigauss( baseline_width: float, centers: list[float], peak_range: tuple[float,float]=(1,4), - cutoff: tuple[float, float] = (0, np.infty), + cutoff: tuple[float, float] = (0, np.inf), background_linear: bool = True, bins = None, ) -> lm.model.ModelResult: @@ -102,7 +102,7 @@ def fit_peaks_multigauss( baseline_width (float): An estimate of the width in Volts of the noise. centers (List[float]): Initial guesses for the centroid of each Gaussian. peak_range (tuple[int, int]): The number of peaks you want to fit. Defaults to 4. - cutoff (Tuple[float, float], optional): Low and high cutoff values. Defaults to (0, np.infty). + cutoff (Tuple[float, float], optional): Low and high cutoff values. Defaults to (0, np.inf). Returns: ModelResult: An lmfit ModelResult object containing all fit information. @@ -176,7 +176,7 @@ def __init__( info: MeasurementInfo, centers: List[float] | np.ndarray = [], #initializes centers as an empty list (so code still works for alpha data) baseline_correct: bool = False, - cutoff: Tuple[float, float] = (0, np.infty), + cutoff: Tuple[float, float] = (0, np.inf), peak_range: Tuple[int,int] = (1,4), subtraction_method: bool = False, background_linear: bool = True, @@ -190,7 +190,7 @@ def __init__( info (MeasurementInfo): Class containing info regarding measurement. centers (List[float]): Initial guesses for centroid of each gaussian. baseline_correct (bool): Boolean value indicating if baseline correction needs to be applied. Defaults to False. - cutoff (Tuple[float, float]): Low and high cutoff values. Defaults to (0,np.infty). + cutoff (Tuple[float, float]): Low and high cutoff values. Defaults to (0,np.inf). peak_range: The number of peaks you want to fit. Defaults to 4. background_linear: If to fit a linear or exponential background to the multi-gaussian peaks: Which peaks to include, either all, LED, or dark. @@ -580,11 +580,11 @@ def plot_peak_histogram( # ratio = self.peak_heights textstr += f'--\n' if self.background_linear: - textstr += f'Linear Intercept: {self.peak_fit.best_values['l_intercept']:0.4}\n' - textstr += f'Linear Slope: {self.peak_fit.best_values['l_slope']:0.4}\n' + textstr += f'''Linear Intercept: {self.peak_fit.best_values['l_intercept']:0.4}\n''' + textstr += f'''Linear Slope: {self.peak_fit.best_values['l_slope']:0.4}\n''' else: - textstr += f'Exp Amplitude: {self.peak_fit.best_values['e_amplitude']:0.4}\n' - textstr += f'Exp Decay: {self.peak_fit.best_values['e_decay']:0.4}\n' + textstr += f'''Exp Amplitude: {self.peak_fit.best_values['e_amplitude']:0.4}\n''' + textstr += f'''Exp Decay: {self.peak_fit.best_values['e_decay']:0.4}\n''' textstr += f'--\n' textstr += f'''Reduced $\\chi^2$: {self.peak_fit.redchi:0.2}''' diff --git a/src/ProcessWaveforms.py b/src/ProcessWaveforms.py index c2d9953..e16910e 100644 --- a/src/ProcessWaveforms.py +++ b/src/ProcessWaveforms.py @@ -69,7 +69,7 @@ def get_grp_names(h5path: str) -> list: group_names = list(hdf["RunData"].keys()) return group_names -def get_mode(hist_data: list or np.array) -> tuple[float, float]: +def get_mode(hist_data: list | np.ndarray) -> tuple[float, float]: """Get the mode of histogram data Args: @@ -143,7 +143,7 @@ def __init__( upper_limit: float = 4.4, baseline_correct: bool = False, poly_correct: bool = False, - prominence: float = 0.005, + prominence: float = None, fourier: bool = False, condition: str = "unspecified medium (GN/LXe/Vacuum)", num_waveforms: float = 0 @@ -158,13 +158,13 @@ def __init__( Args: f list: list of h5 file names - acquisition (str, optional): specified file name. Defaults to 'placeholder'. + acquisition (str, optional): specified file name. None will select all acquisitions. Defaults to None. is_pre_bd (bool, optional): specified whether data is solicited, AKA 'empty' baseline data. Defaults to False. do_filter (bool, optional): activates butterworth lowpass filter if True. Defaults to False. plot_waveforms (bool, optional): plots waveforms if True. Defaults to False. - upper_limit (float, optional): amplitude threshold for discarding waveforms. Defaults to 4.4. + upper_limit (float, optional): amplitude threshold for discarding waveforms. Set to -1 for automatic setting. Defaults to 4.4. baseline_correct (bool, optional): baseline corrects waveforms if True. Defaults to False. - prominence (float, optional): parameter used for peak finding algo. Defaults to 0.005. + prominence (float, optional): parameter used for scipy.find_peaks. None will run auto_prominence. Defaults to None. fourier (bool, optional): if True performs fourier frequency subtraction. Defaults to False. num_waveforms: (float, optional): number of oscilloscope traces to read in before stopping; default 0 reads everything Raises: @@ -270,12 +270,16 @@ def __init__( if self.upper_limit == -1: self.upper_limit = self.yrange - self.offset - 0.001 + if not self.prominence: + self.prominence = self.auto_prominence() + print(f"PROMINENCE USED FOR BIAS = {self.bias} V: {self.prominence*1000:0.4} mV") + # Searches for peaks using provided parameters self.peak_search_params = { "height": 0.0, # SPE "threshold": None, # SPE "distance": None, # SPE - "prominence": prominence, #specified by user + "prominence": self.prominence, #specified by user "width": None, # SPE "wlen": 100, # SPE "rel_height": None, # SPE @@ -384,6 +388,7 @@ def process_amp(self, amp, fs): use_bins = np.linspace(-self.upper_limit, self.upper_limit, 1000) curr_hist = np.histogram(amp, bins=use_bins) baseline_level, _ = get_mode(curr_hist) + self.baseline_levels.append(baseline_level) self.baseline_mode = baseline_level # print('baseline:', baseline_level) amp -= baseline_level @@ -401,3 +406,40 @@ def get_baseline(self): self.baseline_mean = baseline_fit["fit"].values["center"] self.baseline_err = baseline_fit["fit"].params["center"].stderr self.baseline_std = baseline_fit["fit"].values["sigma"] + + def auto_prominence(self) -> float: + proms = [] + for f in self.acquisitions_data: + for a in self.acquisitions_data[f]: + data = self.acquisitions_data[f][a] + tlu = [data[:,i] for i in range(1,len(data[0,:]))] # reconfigure the waveforms so that I can do a mapping below + res = map(lambda x: signal.find_peaks(x, + height=0.0, + threshold=None, + distance=None, + prominence=.001, + width=None, + wlen=50, + rel_height=None, + plateau_size=None), + tlu) + + maxes = list(res) # running the map object, maxes is a tuple of (peaks,props) + props = [i[1] for i in maxes] # grab the properties of the peaks + morps = [i['prominences'] for i in props] # grab the prominences of the peaks + morp = [] # this and the next two lines generate a list of all prominences from the acquisition + for i in morps: + morp += list(i) + proms += morp # add the acquisition's morp list to the overall dataset's prominence list + + fig,ax = plt.subplots(1,1) # plotting + bins = np.linspace(min(proms),max(proms),500) + n,bins,_ = ax.hist(proms,bins=bins) + plt.close(fig) + + ult = np.array([i for i in zip(n,bins) if (i[1]<0.0065 and i[1]>0.0035)]) # grab selection of where the valley should be + ultn = ult[:,0] # bin heights + ultbins = ult[:,1] # bin locations + #print(title) + index = np.argmin(ultn)-1 # where is the minimum of the bin heights + return ultbins[index] # output prominence corresponding to minimum of bin heights \ No newline at end of file