diff --git a/pygsti/baseobjs/basisconstructors.py b/pygsti/baseobjs/basisconstructors.py index 71e3ac9b0..7edff58c4 100644 --- a/pygsti/baseobjs/basisconstructors.py +++ b/pygsti/baseobjs/basisconstructors.py @@ -59,7 +59,6 @@ def mut(i, j, n): def _check_dim(dim): - global MAX_BASIS_MATRIX_DIM if not isinstance(dim, _numbers.Integral): dim = max(dim) # assume dim is a list/tuple of dims & just consider max if dim > MAX_BASIS_MATRIX_DIM: diff --git a/pygsti/forwardsims/termforwardsim_calc_generic.py b/pygsti/forwardsims/termforwardsim_calc_generic.py index a477248c3..efdf83857 100644 --- a/pygsti/forwardsims/termforwardsim_calc_generic.py +++ b/pygsti/forwardsims/termforwardsim_calc_generic.py @@ -115,7 +115,7 @@ def prs_as_polynomials(fwdsim, rholabel, elabels, circuit, polynomial_vindices_p # for term in termlist: # print("Coeff: ",str(term.coeff)) - global DEBUG_FCOUNT # DEBUG!!! + # global DEBUG_FCOUNT # DEBUG!!! # db_part_cnt = 0 # db_factor_cnt = 0 #print("DB: pr_as_poly for ",str(tuple(map(str,circuit))), " max_order=",fwdsim.max_order) diff --git a/pygsti/optimize/simplerlm.py b/pygsti/optimize/simplerlm.py index 24432d8f2..a027bfd53 100644 --- a/pygsti/optimize/simplerlm.py +++ b/pygsti/optimize/simplerlm.py @@ -560,7 +560,7 @@ def simplish_leastsq( Jac = jac_guarded(k, num_fd_iters, obj_fn, jac_fn, f, ari, global_x, fdJac) if profiler: - jac_gb = Jac.nbytes/(1024.0**3) if hasattr(Jac, 'nbytes') else _np.NaN + jac_gb = Jac.nbytes/(1024.0**3) if hasattr(Jac, 'nbytes') else _np.nan vals = ((f.size, global_x.size), jac_gb) profiler.memory_check("simplish_leastsq: after jacobian: shape=%s, GB=%.2f" % vals) diff --git a/pygsti/report/factory.py b/pygsti/report/factory.py index d08ef6008..af52d5e60 100644 --- a/pygsti/report/factory.py +++ b/pygsti/report/factory.py @@ -1197,13 +1197,36 @@ def construct_standard_report(results, title="auto", - idt_idle_oplabel : Label, optional The label identifying the idle gate (for use with idle tomography). + - skip_sections : tuple[str], optional + Contains names of standard report sections that should be skipped + in this particular report. Strings will be cast to lowercase, + stripped of white space, and then mapped to omitted Section classes + as follows + { + 'summary' : SummarySection, + 'goodness' : GoodnessSection, + 'colorbox' : GoodnessColorBoxPlotSection, + 'invariantgates' : GaugeInvariantsGatesSection, + 'invariantgerms' : GaugeInvariantsGermsSection, + 'variant' : GaugeVariantSection, + 'variantraw' : GaugeVariantsRawSection, + 'variantdecomp' : GaugeVariantsDecompSection, + 'varianterrorgen' : GaugeVariantsErrorGenSection, + 'input' : InputSection, + 'meta' : MetaSection, + 'help' : HelpSection + } + + A KeyError will be raised if skip_sections contains a string + that is not in the keys of the above dict (after casting to + lower case and stripping white space). + verbosity : int, optional How much detail to send to stdout. Returns ------- - Workspace - The workspace object used to create the report + Report """ printer = _VerbosityPrinter.create_printer(verbosity, comm=comm) @@ -1265,20 +1288,30 @@ def construct_standard_report(results, title="auto", flags.add('CombineRobust') # build section list - sections = [ - _section.SummarySection(), - _section.GoodnessSection(), - _section.GoodnessColorBoxPlotSection(), - _section.GaugeInvariantsGatesSection(), - _section.GaugeInvariantsGermsSection(), - _section.GaugeVariantSection(), - _section.GaugeVariantsRawSection(), - _section.GaugeVariantsDecompSection(), - _section.GaugeVariantsErrorGenSection(), - _section.InputSection(), - _section.MetaSection(), - _section.HelpSection() - ] + possible_sections = { + 'summary' : _section.SummarySection(), + 'goodness' : _section.GoodnessSection(), + 'colorbox' : _section.GoodnessColorBoxPlotSection(), + 'invariantgates' : _section.GaugeInvariantsGatesSection(), + 'invariantgerms' : _section.GaugeInvariantsGermsSection(), + 'variant' : _section.GaugeVariantSection(), + 'variantraw' : _section.GaugeVariantsRawSection(), + 'variantdecomp' : _section.GaugeVariantsDecompSection(), + 'varianterrorgen' : _section.GaugeVariantsErrorGenSection(), + 'input' : _section.InputSection(), + 'meta' : _section.MetaSection(), + 'help' : _section.HelpSection() + } + + skip_sections = advanced_options.get('skip_sections', tuple()) + if skip_sections: + if isinstance(skip_sections, str): + skip_sections = [skip_sections] + skip_sections = [s.lower().replace(' ','') for s in skip_sections] + for s in skip_sections: + possible_sections.pop(s) + sections = list(possible_sections.values()) + # ^ This whole process won't affect ordering of objects in "sections". if 'ShowScaling' in flags: sections.append(_section.GoodnessScalingSection()) diff --git a/pygsti/report/reportables.py b/pygsti/report/reportables.py index 39bd7c533..dfa46a181 100644 --- a/pygsti/report/reportables.py +++ b/pygsti/report/reportables.py @@ -353,8 +353,7 @@ def rel_circuit_eigenvalues(model_a, model_b, circuit): """ A = model_a.sim.product(circuit) # "gate" B = model_b.sim.product(circuit) # "target gate" - rel_op = _np.dot(_np.linalg.inv(B), A) # "relative gate" == target^{-1} * gate - return _np.linalg.eigvals(rel_op) + return rel_eigenvalues(A, B, None) Rel_circuit_eigenvalues = _modf.modelfn_factory(rel_circuit_eigenvalues) @@ -921,13 +920,16 @@ def upper_bound_fidelity(gate, mx_basis): gate : numpy.ndarray the transfer-matrix specifying a gate's action. - mx_basis : Basis or {'pp', 'gm', 'std'} - the basis that `gate` is in. + mx_basis : Basis or string + Currently restricted to Pauli-product Returns ------- float """ + basis_str = mx_basis if isinstance(mx_basis, str) else mx_basis.name + if basis_str != 'pp': + raise NotImplementedError(f'Basis must be Pauli-Product, got {mx_basis}.') return _tools.fidelity_upper_bound(gate)[0] @@ -1318,8 +1320,12 @@ def std_unitarity(a, b, mx_basis): ------- float """ - Lambda = _np.dot(a, _np.linalg.inv(b)) - return _tools.unitarity(Lambda, mx_basis) + try: + Lambda = _np.dot(a, _np.linalg.inv(b)) + return _tools.unitarity(Lambda, mx_basis) + except _np.linalg.LinAlgError as e: + _warnings.warn(str(e)) + return _np.nan def eigenvalue_unitarity(a, b): @@ -1338,10 +1344,14 @@ def eigenvalue_unitarity(a, b): ------- float """ - Lambda = _np.dot(a, _np.linalg.inv(b)) - d2 = Lambda.shape[0] - lmb = _np.linalg.eigvals(Lambda) - return float(_np.real(_np.linalg.norm(lmb)**2) - 1.0) / (d2 - 1.0) + try: + Lambda = _np.dot(a, _np.linalg.inv(b)) + d2 = Lambda.shape[0] + lmb = _np.linalg.eigvals(Lambda) + return float(_np.real(_np.linalg.norm(lmb)**2) - 1.0) / (d2 - 1.0) + except _np.linalg.LinAlgError as e: + _warnings.warn(str(e)) + return _np.nan def nonunitary_entanglement_infidelity(a, b, mx_basis): @@ -1700,9 +1710,13 @@ def rel_eigenvalues(a, b, mx_basis): ------- numpy.ndarray """ - target_op_inv = _np.linalg.inv(b) - rel_op = _np.dot(target_op_inv, a) - return _np.linalg.eigvals(rel_op).astype("complex") # since they generally *can* be complex + try: + target_op_inv = _np.linalg.inv(b) + rel_op = _np.dot(target_op_inv, a) + return _np.linalg.eigvals(rel_op).astype("complex") # since they generally *can* be complex + except _np.linalg.LinAlgError as e: + _warnings.warn(str(e)) + return _np.nan * _np.ones(a.shape) Rel_eigvals = _modf.opsfn_factory(rel_eigenvalues) @@ -1790,28 +1804,8 @@ def rel_log_diff_eigenvalues(a, b, mx_basis): # init args == (model1, model2, op_label) -def rel_gate_eigenvalues(a, b, mx_basis): # DUPLICATE of rel_eigenvalues TODO - """ - Eigenvalues of b^{-1} * a - - Parameters - ---------- - a : numpy.ndarray - The first process (transfer) matrix. - - b : numpy.ndarray - The second process (transfer) matrix. - - mx_basis : Basis or {'pp', 'gm', 'std'} - the basis that `a` and `b` are in. - - Returns - ------- - numpy.ndarray - """ - rel_op = _np.dot(_np.linalg.inv(b), a) # "relative gate" == target^{-1} * gate - return _np.linalg.eigvals(rel_op).astype("complex") # since they generally *can* be complex - +rel_gate_eigenvalues = rel_eigenvalues +# ^ An alias. Rel_gate_eigenvalues = _modf.opsfn_factory(rel_gate_eigenvalues) # init args == (model1, model2, op_label) @@ -2157,21 +2151,37 @@ def general_decomposition(model_a, model_b): gl = str(gl) # Label -> str for decomp-dict keys target_evals = _np.linalg.eigvals(targetOp) - if _np.any(_np.isclose(target_evals, -1.0)): - target_logG = _tools.unitary_superoperator_matrix_log(targetOp, mxBasis) - logG = _tools.approximate_matrix_log(gate, target_logG) - else: - logG = _tools.real_matrix_log(gate, "warn") - if _np.linalg.norm(logG.imag) > 1e-6: - _warnings.warn("Truncating imaginary logarithm!") - logG = _np.real(logG) + failed = False + try: + if _np.any(_np.isclose(target_evals, -1.0)): + target_logG = _tools.unitary_superoperator_matrix_log(targetOp, mxBasis) + logG = _tools.approximate_matrix_log(gate, target_logG) + else: + logG = _tools.real_matrix_log(gate, "warn") + if _np.linalg.norm(logG.imag) > 1e-6: + _warnings.warn("Truncating imaginary logarithm!") + logG = _np.real(logG) + except (_np.linalg.LinAlgError, AssertionError) as e: + _warnings.warn(str(e)) + logG = _np.nan * _np.ones(gate.shape) + failed = True + + proj_basis = _Basis.cast('PP', model_a.dim) if model_a.state_space.is_entirely_qubits else mxBasis + basis_mxs = proj_basis.elements + blk = _LindbladCoefficientBlock('ham', proj_basis) + num_elem_errgens = len(blk.elementary_errorgens) + + if failed: + decomp[gl + ' log inexactness'] = _np.nan + decomp[gl + ' axis' ] = _np.nan * _np.ones(num_elem_errgens) + decomp[gl + ' angle'] = _np.nan + decomp[gl + ' hamiltonian eigenvalues'] = _np.nan * _np.ones(basis_mxs[0].shape[0]) + continue decomp[gl + ' log inexactness'] = _np.linalg.norm(_spl.expm(logG) - gate) #hamProjs, hamGens = _tools.std_errorgen_projections( # logG, "hamiltonian", mxBasis, mxBasis, return_generators=True) - proj_basis = _Basis.cast('PP', model_a.dim) if model_a.state_space.is_entirely_qubits else mxBasis - blk = _LindbladCoefficientBlock('ham', proj_basis) blk.set_from_errorgen_projections(logG, mxBasis) hamProjs = blk.block_data #hamGens = blk.create_lindblad_term_superoperators(mxBasis) @@ -2185,7 +2195,6 @@ def general_decomposition(model_a, model_b): # to *twice* this coefficient (e.g. a X(pi/2) rotn is exp( i pi/4 X ) ), # thus the factor of 2.0 above. - basis_mxs = proj_basis.elements # REMOVE scalings = [(_np.linalg.norm(hamGens[i]) / _np.linalg.norm(_tools.hamiltonian_to_lindbladian(mx)) # REMOVE if _np.linalg.norm(hamGens[i]) > 1e-10 else 0.0) # REMOVE for i, mx in enumerate(basis_mxs)] @@ -2198,6 +2207,9 @@ def general_decomposition(model_a, model_b): for gl_other in opLabels: rotnAngle = decomp[str(gl) + ' angle'] rotnAngle_other = decomp[str(gl_other) + ' angle'] + if _np.isnan(rotnAngle) or _np.isnan(rotnAngle_other): + decomp[str(gl) + "," + str(gl_other) + " axis angle"] = _np.nan + continue if gl == gl_other or abs(rotnAngle) < 1e-4 or abs(rotnAngle_other) < 1e-4: decomp[str(gl) + "," + str(gl_other) + " axis angle"] = 10000.0 # sentinel for irrelevant angle @@ -2439,7 +2451,7 @@ def info_of_opfn_by_name(name): 'respectively'), "trace": ("1/2 Trace|Distance", "0.5 | Chi(A) - Chi(B) |_tr"), - "diamond": ("1/2 Diamond-Dist", + "diamond": ("1/2 Diamond|Distance", "0.5 sup | (1 x (A-B))(rho) |_tr"), "nuinf": ("Non-unitary|Ent. Infidelity", "(d^2-1)/d^2 [1 - sqrt( unitarity(A B^-1) )]"), diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index 5d85ba879..309c5b931 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -498,8 +498,8 @@ def entanglement_fidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None): #if the tp flag isn't set we'll calculate whether it is true here if is_tp is None: - def is_tp_fn(x): return _np.isclose(x[0, 0], 1.0) and all( - [_np.isclose(x[0, i], 0) for i in range(1,d2)]) + def is_tp_fn(x): + return _np.isclose(x[0, 0], 1.0) and _np.allclose(x[0, 1:d2], 0) is_tp= (is_tp_fn(a) and is_tp_fn(b)) @@ -973,9 +973,14 @@ def povm_fidelity(model, target_model, povmlbl): ------- float """ - povm_mx = compute_povm_map(model, povmlbl) - target_povm_mx = compute_povm_map(target_model, povmlbl) - return entanglement_fidelity(povm_mx, target_povm_mx, target_model.basis) + try: + povm_mx = compute_povm_map(model, povmlbl) + target_povm_mx = compute_povm_map(target_model, povmlbl) + return entanglement_fidelity(povm_mx, target_povm_mx, target_model.basis) + except AssertionError as e: + se = str(e) + assert '`dim` must be a perfect square' in se, se + return _np.nan def povm_jtracedist(model, target_model, povmlbl): @@ -997,9 +1002,14 @@ def povm_jtracedist(model, target_model, povmlbl): ------- float """ - povm_mx = compute_povm_map(model, povmlbl) - target_povm_mx = compute_povm_map(target_model, povmlbl) - return jtracedist(povm_mx, target_povm_mx, target_model.basis) + try: + povm_mx = compute_povm_map(model, povmlbl) + target_povm_mx = compute_povm_map(target_model, povmlbl) + return jtracedist(povm_mx, target_povm_mx, target_model.basis) + except AssertionError as e: + se = str(e) + assert '`dim` must be a perfect square' in se, se + return _np.nan def povm_diamonddist(model, target_model, povmlbl): @@ -1785,13 +1795,15 @@ def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_label """ # the same as decompose_errorgen but given a dict/list of elementary errorgens directly instead of a basis and type if isinstance(errorgen_basis, _Basis): - errorgen_std = _bt.change_basis(errorgen, errorgen_basis, errorgen_basis.create_equivalent('std')) + std_basis = errorgen_basis.create_equivalent('std') + errorgen_std = _bt.change_basis(errorgen, errorgen_basis, std_basis) - #expand operation matrix so it acts on entire space of dmDim x dmDim density matrices - errorgen_std = _bt.resize_std_mx(errorgen_std, 'expand', errorgen_basis.create_equivalent('std'), - errorgen_basis.create_simple_equivalent('std')) + # Expand operation matrix so it acts on entire space of dmDim x dmDim density matrices + expanded_std_basis = errorgen_basis.create_simple_equivalent('std') + errorgen_std = _bt.resize_std_mx(errorgen_std, 'expand', std_basis, expanded_std_basis) else: errorgen_std = _bt.change_basis(errorgen, errorgen_basis, "std") + flat_errorgen_std = errorgen_std.toarray().ravel() if _sps.issparse(errorgen_std) else errorgen_std.ravel() d2 = errorgen_std.shape[0] diff --git a/pygsti/tools/rbtheory.py b/pygsti/tools/rbtheory.py index dd6c21ffe..9649fc6c4 100644 --- a/pygsti/tools/rbtheory.py +++ b/pygsti/tools/rbtheory.py @@ -90,7 +90,7 @@ def predicted_rb_number(model, target_model, weights=None, d=None, rtype='EI'): """ if d is None: d = int(round(_np.sqrt(model.dim))) p = predicted_rb_decay_parameter(model, target_model, weights=weights) - r = _rbtls.p_to_r(p, d=d, rtype=rtype) + r = _rbtls.p_to_r(p, d=d, rtype=rtype) if _np.isnan(p) else _np.nan return r @@ -132,15 +132,18 @@ def predicted_rb_decay_parameter(model, target_model, weights=None): The second largest eigenvalue of L. This is the RB decay parameter for various types of RB. """ - L = L_matrix(model, target_model, weights=weights) - E = _np.absolute(_np.linalg.eigvals(L)) - E = _np.flipud(_np.sort(E)) - if abs(E[0] - 1) > 10**(-12): - _warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.") - - if E[1].imag > 10**(-10): - _warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.") - p = abs(E[1]) + try: + L = L_matrix(model, target_model, weights=weights) + E = _np.absolute(_np.linalg.eigvals(L)) + E = _np.flipud(_np.sort(E)) + if abs(E[0] - 1) > 10**(-12): + _warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.") + + if E[1].imag > 10**(-10): + _warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.") + p = abs(E[1]) + except _np.linalg.LinAlgError: + p = _np.nan return p