Skip to content

à la carte standard GST reports, and exception handling for computing metrics #615

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion pygsti/baseobjs/basisconstructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ def mut(i, j, n):


def _check_dim(dim):
global MAX_BASIS_MATRIX_DIM
if not isinstance(dim, _numbers.Integral):
dim = max(dim) # assume dim is a list/tuple of dims & just consider max
if dim > MAX_BASIS_MATRIX_DIM:
Expand Down
2 changes: 1 addition & 1 deletion pygsti/forwardsims/termforwardsim_calc_generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def prs_as_polynomials(fwdsim, rholabel, elabels, circuit, polynomial_vindices_p
# for term in termlist:
# print("Coeff: ",str(term.coeff))

global DEBUG_FCOUNT # DEBUG!!!
# global DEBUG_FCOUNT # DEBUG!!!
# db_part_cnt = 0
# db_factor_cnt = 0
#print("DB: pr_as_poly for ",str(tuple(map(str,circuit))), " max_order=",fwdsim.max_order)
Expand Down
2 changes: 1 addition & 1 deletion pygsti/optimize/simplerlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ def simplish_leastsq(
Jac = jac_guarded(k, num_fd_iters, obj_fn, jac_fn, f, ari, global_x, fdJac)

if profiler:
jac_gb = Jac.nbytes/(1024.0**3) if hasattr(Jac, 'nbytes') else _np.NaN
jac_gb = Jac.nbytes/(1024.0**3) if hasattr(Jac, 'nbytes') else _np.nan
vals = ((f.size, global_x.size), jac_gb)
profiler.memory_check("simplish_leastsq: after jacobian: shape=%s, GB=%.2f" % vals)

Expand Down
65 changes: 49 additions & 16 deletions pygsti/report/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -1197,13 +1197,36 @@ def construct_standard_report(results, title="auto",
- idt_idle_oplabel : Label, optional
The label identifying the idle gate (for use with idle tomography).

- skip_sections : tuple[str], optional
Contains names of standard report sections that should be skipped
in this particular report. Strings will be cast to lowercase,
stripped of white space, and then mapped to omitted Section classes
as follows
{
'summary' : SummarySection,
'goodness' : GoodnessSection,
'colorbox' : GoodnessColorBoxPlotSection,
'invariantgates' : GaugeInvariantsGatesSection,
'invariantgerms' : GaugeInvariantsGermsSection,
'variant' : GaugeVariantSection,
'variantraw' : GaugeVariantsRawSection,
'variantdecomp' : GaugeVariantsDecompSection,
'varianterrorgen' : GaugeVariantsErrorGenSection,
'input' : InputSection,
'meta' : MetaSection,
'help' : HelpSection
}

A KeyError will be raised if skip_sections contains a string
that is not in the keys of the above dict (after casting to
lower case and stripping white space).

verbosity : int, optional
How much detail to send to stdout.

Returns
-------
Workspace
The workspace object used to create the report
Report
"""

printer = _VerbosityPrinter.create_printer(verbosity, comm=comm)
Expand Down Expand Up @@ -1265,20 +1288,30 @@ def construct_standard_report(results, title="auto",
flags.add('CombineRobust')

# build section list
sections = [
_section.SummarySection(),
_section.GoodnessSection(),
_section.GoodnessColorBoxPlotSection(),
_section.GaugeInvariantsGatesSection(),
_section.GaugeInvariantsGermsSection(),
_section.GaugeVariantSection(),
_section.GaugeVariantsRawSection(),
_section.GaugeVariantsDecompSection(),
_section.GaugeVariantsErrorGenSection(),
_section.InputSection(),
_section.MetaSection(),
_section.HelpSection()
]
possible_sections = {
'summary' : _section.SummarySection(),
'goodness' : _section.GoodnessSection(),
'colorbox' : _section.GoodnessColorBoxPlotSection(),
'invariantgates' : _section.GaugeInvariantsGatesSection(),
'invariantgerms' : _section.GaugeInvariantsGermsSection(),
'variant' : _section.GaugeVariantSection(),
'variantraw' : _section.GaugeVariantsRawSection(),
'variantdecomp' : _section.GaugeVariantsDecompSection(),
'varianterrorgen' : _section.GaugeVariantsErrorGenSection(),
'input' : _section.InputSection(),
'meta' : _section.MetaSection(),
'help' : _section.HelpSection()
}

skip_sections = advanced_options.get('skip_sections', tuple())
if skip_sections:
if isinstance(skip_sections, str):
skip_sections = [skip_sections]
skip_sections = [s.lower().replace(' ','') for s in skip_sections]
for s in skip_sections:
possible_sections.pop(s)
sections = list(possible_sections.values())
# ^ This whole process won't affect ordering of objects in "sections".

if 'ShowScaling' in flags:
sections.append(_section.GoodnessScalingSection())
Expand Down
106 changes: 59 additions & 47 deletions pygsti/report/reportables.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,8 +353,7 @@ def rel_circuit_eigenvalues(model_a, model_b, circuit):
"""
A = model_a.sim.product(circuit) # "gate"
B = model_b.sim.product(circuit) # "target gate"
rel_op = _np.dot(_np.linalg.inv(B), A) # "relative gate" == target^{-1} * gate
return _np.linalg.eigvals(rel_op)
return rel_eigenvalues(A, B, None)


Rel_circuit_eigenvalues = _modf.modelfn_factory(rel_circuit_eigenvalues)
Expand Down Expand Up @@ -921,13 +920,16 @@ def upper_bound_fidelity(gate, mx_basis):
gate : numpy.ndarray
the transfer-matrix specifying a gate's action.

mx_basis : Basis or {'pp', 'gm', 'std'}
the basis that `gate` is in.
mx_basis : Basis or string
Currently restricted to Pauli-product

Returns
-------
float
"""
basis_str = mx_basis if isinstance(mx_basis, str) else mx_basis.name
if basis_str != 'pp':
raise NotImplementedError(f'Basis must be Pauli-Product, got {mx_basis}.')
return _tools.fidelity_upper_bound(gate)[0]


Expand Down Expand Up @@ -1318,8 +1320,12 @@ def std_unitarity(a, b, mx_basis):
-------
float
"""
Lambda = _np.dot(a, _np.linalg.inv(b))
return _tools.unitarity(Lambda, mx_basis)
try:
Lambda = _np.dot(a, _np.linalg.inv(b))
return _tools.unitarity(Lambda, mx_basis)
except _np.linalg.LinAlgError as e:
_warnings.warn(str(e))
return _np.nan


def eigenvalue_unitarity(a, b):
Expand All @@ -1338,10 +1344,14 @@ def eigenvalue_unitarity(a, b):
-------
float
"""
Lambda = _np.dot(a, _np.linalg.inv(b))
d2 = Lambda.shape[0]
lmb = _np.linalg.eigvals(Lambda)
return float(_np.real(_np.linalg.norm(lmb)**2) - 1.0) / (d2 - 1.0)
try:
Lambda = _np.dot(a, _np.linalg.inv(b))
d2 = Lambda.shape[0]
lmb = _np.linalg.eigvals(Lambda)
return float(_np.real(_np.linalg.norm(lmb)**2) - 1.0) / (d2 - 1.0)
except _np.linalg.LinAlgError as e:
_warnings.warn(str(e))
return _np.nan


def nonunitary_entanglement_infidelity(a, b, mx_basis):
Expand Down Expand Up @@ -1700,9 +1710,13 @@ def rel_eigenvalues(a, b, mx_basis):
-------
numpy.ndarray
"""
target_op_inv = _np.linalg.inv(b)
rel_op = _np.dot(target_op_inv, a)
return _np.linalg.eigvals(rel_op).astype("complex") # since they generally *can* be complex
try:
target_op_inv = _np.linalg.inv(b)
rel_op = _np.dot(target_op_inv, a)
return _np.linalg.eigvals(rel_op).astype("complex") # since they generally *can* be complex
except _np.linalg.LinAlgError as e:
_warnings.warn(str(e))
return _np.nan * _np.ones(a.shape)


Rel_eigvals = _modf.opsfn_factory(rel_eigenvalues)
Expand Down Expand Up @@ -1790,28 +1804,8 @@ def rel_log_diff_eigenvalues(a, b, mx_basis):
# init args == (model1, model2, op_label)


def rel_gate_eigenvalues(a, b, mx_basis): # DUPLICATE of rel_eigenvalues TODO
"""
Eigenvalues of b^{-1} * a

Parameters
----------
a : numpy.ndarray
The first process (transfer) matrix.

b : numpy.ndarray
The second process (transfer) matrix.

mx_basis : Basis or {'pp', 'gm', 'std'}
the basis that `a` and `b` are in.

Returns
-------
numpy.ndarray
"""
rel_op = _np.dot(_np.linalg.inv(b), a) # "relative gate" == target^{-1} * gate
return _np.linalg.eigvals(rel_op).astype("complex") # since they generally *can* be complex

rel_gate_eigenvalues = rel_eigenvalues
# ^ An alias.

Rel_gate_eigenvalues = _modf.opsfn_factory(rel_gate_eigenvalues)
# init args == (model1, model2, op_label)
Expand Down Expand Up @@ -2157,21 +2151,37 @@ def general_decomposition(model_a, model_b):
gl = str(gl) # Label -> str for decomp-dict keys

target_evals = _np.linalg.eigvals(targetOp)
if _np.any(_np.isclose(target_evals, -1.0)):
target_logG = _tools.unitary_superoperator_matrix_log(targetOp, mxBasis)
logG = _tools.approximate_matrix_log(gate, target_logG)
else:
logG = _tools.real_matrix_log(gate, "warn")
if _np.linalg.norm(logG.imag) > 1e-6:
_warnings.warn("Truncating imaginary logarithm!")
logG = _np.real(logG)
failed = False
try:
if _np.any(_np.isclose(target_evals, -1.0)):
target_logG = _tools.unitary_superoperator_matrix_log(targetOp, mxBasis)
logG = _tools.approximate_matrix_log(gate, target_logG)
else:
logG = _tools.real_matrix_log(gate, "warn")
if _np.linalg.norm(logG.imag) > 1e-6:
_warnings.warn("Truncating imaginary logarithm!")
logG = _np.real(logG)
except (_np.linalg.LinAlgError, AssertionError) as e:
_warnings.warn(str(e))
logG = _np.nan * _np.ones(gate.shape)
failed = True

proj_basis = _Basis.cast('PP', model_a.dim) if model_a.state_space.is_entirely_qubits else mxBasis
basis_mxs = proj_basis.elements
blk = _LindbladCoefficientBlock('ham', proj_basis)
num_elem_errgens = len(blk.elementary_errorgens)

if failed:
decomp[gl + ' log inexactness'] = _np.nan
decomp[gl + ' axis' ] = _np.nan * _np.ones(num_elem_errgens)
decomp[gl + ' angle'] = _np.nan
decomp[gl + ' hamiltonian eigenvalues'] = _np.nan * _np.ones(basis_mxs[0].shape[0])
continue

decomp[gl + ' log inexactness'] = _np.linalg.norm(_spl.expm(logG) - gate)

#hamProjs, hamGens = _tools.std_errorgen_projections(
# logG, "hamiltonian", mxBasis, mxBasis, return_generators=True)
proj_basis = _Basis.cast('PP', model_a.dim) if model_a.state_space.is_entirely_qubits else mxBasis
blk = _LindbladCoefficientBlock('ham', proj_basis)
blk.set_from_errorgen_projections(logG, mxBasis)
hamProjs = blk.block_data
#hamGens = blk.create_lindblad_term_superoperators(mxBasis)
Expand All @@ -2185,7 +2195,6 @@ def general_decomposition(model_a, model_b):
# to *twice* this coefficient (e.g. a X(pi/2) rotn is exp( i pi/4 X ) ),
# thus the factor of 2.0 above.

basis_mxs = proj_basis.elements
# REMOVE scalings = [(_np.linalg.norm(hamGens[i]) / _np.linalg.norm(_tools.hamiltonian_to_lindbladian(mx))
# REMOVE if _np.linalg.norm(hamGens[i]) > 1e-10 else 0.0)
# REMOVE for i, mx in enumerate(basis_mxs)]
Expand All @@ -2198,6 +2207,9 @@ def general_decomposition(model_a, model_b):
for gl_other in opLabels:
rotnAngle = decomp[str(gl) + ' angle']
rotnAngle_other = decomp[str(gl_other) + ' angle']
if _np.isnan(rotnAngle) or _np.isnan(rotnAngle_other):
decomp[str(gl) + "," + str(gl_other) + " axis angle"] = _np.nan
continue

if gl == gl_other or abs(rotnAngle) < 1e-4 or abs(rotnAngle_other) < 1e-4:
decomp[str(gl) + "," + str(gl_other) + " axis angle"] = 10000.0 # sentinel for irrelevant angle
Expand Down Expand Up @@ -2439,7 +2451,7 @@ def info_of_opfn_by_name(name):
'respectively'),
"trace": ("1/2 Trace|Distance",
"0.5 | Chi(A) - Chi(B) |_tr"),
"diamond": ("1/2 Diamond-Dist",
"diamond": ("1/2 Diamond|Distance",
"0.5 sup | (1 x (A-B))(rho) |_tr"),
"nuinf": ("Non-unitary|Ent. Infidelity",
"(d^2-1)/d^2 [1 - sqrt( unitarity(A B^-1) )]"),
Expand Down
36 changes: 24 additions & 12 deletions pygsti/tools/optools.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,8 +498,8 @@ def entanglement_fidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None):

#if the tp flag isn't set we'll calculate whether it is true here
if is_tp is None:
def is_tp_fn(x): return _np.isclose(x[0, 0], 1.0) and all(
[_np.isclose(x[0, i], 0) for i in range(1,d2)])
def is_tp_fn(x):
return _np.isclose(x[0, 0], 1.0) and _np.allclose(x[0, 1:d2], 0)

is_tp= (is_tp_fn(a) and is_tp_fn(b))

Expand Down Expand Up @@ -973,9 +973,14 @@ def povm_fidelity(model, target_model, povmlbl):
-------
float
"""
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return entanglement_fidelity(povm_mx, target_povm_mx, target_model.basis)
try:
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return entanglement_fidelity(povm_mx, target_povm_mx, target_model.basis)
except AssertionError as e:
se = str(e)
assert '`dim` must be a perfect square' in se, se
return _np.nan


def povm_jtracedist(model, target_model, povmlbl):
Expand All @@ -997,9 +1002,14 @@ def povm_jtracedist(model, target_model, povmlbl):
-------
float
"""
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return jtracedist(povm_mx, target_povm_mx, target_model.basis)
try:
povm_mx = compute_povm_map(model, povmlbl)
target_povm_mx = compute_povm_map(target_model, povmlbl)
return jtracedist(povm_mx, target_povm_mx, target_model.basis)
except AssertionError as e:
se = str(e)
assert '`dim` must be a perfect square' in se, se
return _np.nan


def povm_diamonddist(model, target_model, povmlbl):
Expand Down Expand Up @@ -1785,13 +1795,15 @@ def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_label
"""
# the same as decompose_errorgen but given a dict/list of elementary errorgens directly instead of a basis and type
if isinstance(errorgen_basis, _Basis):
errorgen_std = _bt.change_basis(errorgen, errorgen_basis, errorgen_basis.create_equivalent('std'))
std_basis = errorgen_basis.create_equivalent('std')
errorgen_std = _bt.change_basis(errorgen, errorgen_basis, std_basis)

#expand operation matrix so it acts on entire space of dmDim x dmDim density matrices
errorgen_std = _bt.resize_std_mx(errorgen_std, 'expand', errorgen_basis.create_equivalent('std'),
errorgen_basis.create_simple_equivalent('std'))
# Expand operation matrix so it acts on entire space of dmDim x dmDim density matrices
expanded_std_basis = errorgen_basis.create_simple_equivalent('std')
errorgen_std = _bt.resize_std_mx(errorgen_std, 'expand', std_basis, expanded_std_basis)
else:
errorgen_std = _bt.change_basis(errorgen, errorgen_basis, "std")

flat_errorgen_std = errorgen_std.toarray().ravel() if _sps.issparse(errorgen_std) else errorgen_std.ravel()

d2 = errorgen_std.shape[0]
Expand Down
23 changes: 13 additions & 10 deletions pygsti/tools/rbtheory.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def predicted_rb_number(model, target_model, weights=None, d=None, rtype='EI'):
"""
if d is None: d = int(round(_np.sqrt(model.dim)))
p = predicted_rb_decay_parameter(model, target_model, weights=weights)
r = _rbtls.p_to_r(p, d=d, rtype=rtype)
r = _rbtls.p_to_r(p, d=d, rtype=rtype) if _np.isnan(p) else _np.nan
return r


Expand Down Expand Up @@ -132,15 +132,18 @@ def predicted_rb_decay_parameter(model, target_model, weights=None):
The second largest eigenvalue of L. This is the RB decay parameter
for various types of RB.
"""
L = L_matrix(model, target_model, weights=weights)
E = _np.absolute(_np.linalg.eigvals(L))
E = _np.flipud(_np.sort(E))
if abs(E[0] - 1) > 10**(-12):
_warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.")

if E[1].imag > 10**(-10):
_warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.")
p = abs(E[1])
try:
L = L_matrix(model, target_model, weights=weights)
E = _np.absolute(_np.linalg.eigvals(L))
E = _np.flipud(_np.sort(E))
if abs(E[0] - 1) > 10**(-12):
_warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.")

if E[1].imag > 10**(-10):
_warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.")
p = abs(E[1])
except _np.linalg.LinAlgError:
p = _np.nan
return p


Expand Down