This tutorial demonstrates how to perform GST on a "leaky-qubit" described by a 3-level (instead of the desired 2-level) system.
import pygsti
import pygsti.construction as pc
import pygsti.construction.std1Q_XYI as std1Q
import numpy as np
import scipy.linalg as sla
#import pickle
def to_3level_unitary(U_2level):
U_3level = np.zeros((3,3),complex)
U_3level[0:2,0:2] = U_2level
U_3level[2,2] = 1.0
return U_3level
def unitary_to_gmgate(U):
return pygsti.tools.change_basis(
pygsti.tools.unitary_to_process_mx(U), 'std','gm')
def state_to_gmvec(state):
pygsti.tools.stdmx_to_gmvec
Us = pygsti.tools.internalgates.get_standard_gatename_unitaries()
mdl_2level_ideal = std1Q.target_model()
rho0 = np.array( [[1,0,0],
[0,0,0],
[0,0,0]], complex)
E0 = rho0
E1 = np.array( [[0,0,0],
[0,1,0],
[0,0,1]], complex)
sslbls = pygsti.obj.StateSpaceLabels(['Qubit+Leakage'],[3])
mdl_3level_ideal = pygsti.obj.ExplicitOpModel(sslbls, 'gm')
mdl_3level_ideal['rho0'] = pygsti.tools.stdmx_to_gmvec(rho0)
mdl_3level_ideal['Mdefault'] = pygsti.obj.TPPOVM([('0',pygsti.tools.stdmx_to_gmvec(E0)),
('1',pygsti.tools.stdmx_to_gmvec(E1))])
mdl_3level_ideal['Gi'] = unitary_to_gmgate( to_3level_unitary(Us['Gi']))
mdl_3level_ideal['Gx'] = unitary_to_gmgate( to_3level_unitary(Us['Gxpi2']))
mdl_3level_ideal['Gy'] = unitary_to_gmgate( to_3level_unitary(Us['Gypi2']))
sigmaX = np.array([[0,1],[1,0]],complex)
rot = sla.expm(1j * 0.1 * sigmaX)
Uleakage = np.identity(3,complex)
Uleakage[1:3,1:3] = rot
leakageOp = unitary_to_gmgate(Uleakage)
#print(Uleakage)
#Guess of a model w/just unitary leakage
mdl_3level_guess = mdl_3level_ideal.copy()
mdl_3level_guess['Gi'] = np.dot(leakageOp, mdl_3level_guess['Gi'])
#mdl_3level_guess['Gx'] = np.dot(leakageOp, mdl_3level_guess['Gx'])
#mdl_3level_guess['Gy'] = np.dot(leakageOp, mdl_3level_guess['Gy'])
#Actual model used for data generation (some depolarization too)
mdl_3level_noisy = mdl_3level_ideal.depolarize(op_noise=0.005, spam_noise=0.01)
mdl_3level_noisy['Gi'] = np.dot(leakageOp, mdl_3level_noisy['Gi'])
#mdl_3level_noisy['Gx'] = np.dot(leakageOp, mdl_3level_noisy['Gx'])
#mdl_3level_noisy['Gy'] = np.dot(leakageOp, mdl_3level_noisy['Gy'])
#print(mdl_3level_guess)
# get sequences using expected model
generate_fiducials = False
if generate_fiducials:
prepfids, measfids = pygsti.algorithms.generate_fiducials(
mdl_3level_guess, omitIdentity=False, maxFidLength=4, verbosity=4)
pygsti.io.write_circuit_list("example_files/leakage_prepfids.txt", prepfids)
pygsti.io.write_circuit_list("example_files/leakage_measfids.txt", measfids)
prepfids = pygsti.io.load_circuit_list("example_files/leakage_prepfids.txt")
measfids = pygsti.io.load_circuit_list("example_files/leakage_measfids.txt")
germs = std1Q.germs
maxLengths = [1,]
expList = pc.make_lsgst_experiment_list(mdl_3level_noisy, prepfids, measfids, germs, maxLengths)
ds = pc.generate_fake_data(mdl_3level_noisy, expList, 1000, 'binomial', seed=1234)
results_2level = pygsti.do_stdpractice_gst(ds, mdl_2level_ideal, prepfids, measfids,
germs, maxLengths, modes="CPTP", verbosity=3)
results_3level = pygsti.do_stdpractice_gst(ds, mdl_3level_ideal, prepfids, measfids,
germs, maxLengths, modes="CPTP,True",
modelsToTest={'True': mdl_3level_noisy},
verbosity=4, advancedOptions={'all': {'tolerance': 1e-2}})
-- Std Practice: Iter 1 of 1 (CPTP) --: --- Circuit Creation --- 305 sequences created Dataset has 305 entries: 305 utilized, 0 requested sequences were missing --- Iterative MLGST: Iter 1 of 1 305 operation sequences ---: --- Minimum Chi^2 GST --- Sum of Chi^2 = 794.557 (305 data params - 31 model params = expected mean of 274; p-value = 0) Completed in 5.4s 2*Delta(log(L)) = 804.647 Iteration 1 took 5.4s Switching to ML objective (last iteration) --- MLGST --- Maximum log(L) = 395.179 below upper bound of -458095 2*Delta(log(L)) = 790.358 (305 data params - 31 model params = expected mean of 274; p-value = 0) Completed in 1.1s 2*Delta(log(L)) = 790.358 Final MLGST took 1.1s Iterative MLGST Total Time: 6.5s --- Re-optimizing logl after robust data scaling --- --- MLGST --- Maximum log(L) = 395.179 below upper bound of -458095 2*Delta(log(L)) = 790.357 (305 data params - 31 model params = expected mean of 274; p-value = 0) Completed in 0.5s -- Performing 'single' gauge optimization on CPTP estimate -- -- Performing 'single' gauge optimization on CPTP.Robust+ estimate -- -- Std Practice: Iter 1 of 2 (CPTP) --: --- Circuit Creation --- 305 sequences created Dataset has 305 entries: 305 utilized, 0 requested sequences were missing --- Iterative MLGST: Iter 1 of 1 305 operation sequences ---: --- Minimum Chi^2 GST --- Created evaluation tree with 1 subtrees. Will divide 1 procs into 1 (subtree-processing) groups of ~1 procs each, to distribute over 360 params (taken as 1 param groups of ~360 params). --- Outer Iter 0: norm_f = 1.57682e+06, mu=0, |J|=2278.38 --- Outer Iter 1: norm_f = 43698, mu=428.568, |J|=14254 --- Outer Iter 2: norm_f = 5115.05, mu=194.529, |J|=2388.29 --- Outer Iter 3: norm_f = 4310.77, mu=250.155, |J|=2062.3 --- Outer Iter 4: norm_f = 626.091, mu=83.385, |J|=2319.04 --- Outer Iter 5: norm_f = 342.581, mu=526.228, |J|=2351.88 --- Outer Iter 6: norm_f = 276.286, mu=216.687, |J|=2390.09 --- Outer Iter 7: norm_f = 271.075, mu=863.721, |J|=2399.36 Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 0.01 Sum of Chi^2 = 271.075 (305 data params - 161 model params = expected mean of 144; p-value = 7.91906e-10) Completed in 17.6s 2*Delta(log(L)) = 273.05 Iteration 1 took 17.6s Switching to ML objective (last iteration) --- MLGST --- --- Outer Iter 0: norm_f = 136.525, mu=0, |J|=1697.85 Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 0.01 Maximum log(L) = 136.525 below upper bound of -458095 2*Delta(log(L)) = 273.05 (305 data params - 161 model params = expected mean of 144; p-value = 4.90231e-10) Completed in 1.3s 2*Delta(log(L)) = 273.05 Final MLGST took 1.3s Iterative MLGST Total Time: 18.9s --- Re-optimizing logl after robust data scaling ---
WARNING: MLGST failed to improve logl: retaining chi2-objective estimate /Users/enielse/research/pyGSTi/packages/pygsti/objects/estimate.py:531: UserWarning: Max-model params (305) <= model params (360)! Using k == 1.
--- MLGST --- --- Outer Iter 0: norm_f = 136.525, mu=0, |J|=1697.85 Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 0.01 Maximum log(L) = 136.525 below upper bound of -458095 2*Delta(log(L)) = 273.05 (305 data params - 161 model params = expected mean of 144; p-value = 4.90231e-10) Completed in 1.2s -- Performing 'single' gauge optimization on CPTP estimate -- -- Adding Gauge Optimized (single) -- -- Conveying 'single' gauge optimization to CPTP.Robust+ estimate -- -- Adding Gauge Optimized (single) -- -- Std Practice: Iter 2 of 2 (True) --: --- Circuit Creation --- 305 sequences created Dataset has 305 entries: 305 utilized, 0 requested sequences were missing -- Performing 'single' gauge optimization on True estimate -- -- Adding Gauge Optimized (single) --
pygsti.report.create_standard_report({'two-level': results_2level, 'three-level': results_3level},
"example_files/leakage_report", "Leakage Example Report")
*** Creating workspace *** *** Generating switchboard ***
/Users/enielse/research/pyGSTi/packages/pygsti/report/factory.py:785: UserWarning: Idle tomography failed: Label{layers}
Found standard clifford compilation from std1Q_XYI *** Generating tables *** *** Generating plots *** Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. *** Merging into template file *** Output written to example_files/leakage_report directory *** Report Generation Complete! Total time 32.8807s ***
<pygsti.report.workspace.Workspace at 0x11d4c2518>
#try a different basis:
gm_basis = pygsti.obj.Basis('gm',3)
leakage_basis_mxs = [ np.sqrt(2)/3*(np.sqrt(3)*gm_basis[0] + 0.5*np.sqrt(6)*gm_basis[8]),
gm_basis[1], gm_basis[4], gm_basis[7],
gm_basis[2], gm_basis[3], gm_basis[5], gm_basis[6],
1/3*(np.sqrt(3)*gm_basis[0] - np.sqrt(6)*gm_basis[8]) ]
#for mx in leakage_basis_mxs:
# pygsti.tools.print_mx(mx)
check = np.zeros( (9,9), complex)
for i,m1 in enumerate(leakage_basis_mxs):
for j,m2 in enumerate(leakage_basis_mxs):
check[i,j] = np.trace(np.dot(m1,m2))
assert(np.allclose(check, np.identity(9,complex)))
leakage_basis = pygsti.obj.Basis(name="LeakageBasis", matrices=leakage_basis_mxs,
longname="2+1 level leakage basis", real=True,
labels=['I','X','Y','Z','LX0','LX1','LY0','LY1','L'])
def changebasis_3level_model(mdl):
new_mdl = mdl.copy()
new_mdl.preps['rho0'] = pygsti.obj.FullSPAMVec(
pygsti.tools.change_basis(mdl.preps['rho0'].todense(), gm_basis, leakage_basis))
new_mdl.povms['Mdefault'] = pygsti.obj.UnconstrainedPOVM(
[('0', pygsti.tools.change_basis(mdl.povms['Mdefault']['0'].todense(), gm_basis, leakage_basis)),
('1', pygsti.tools.change_basis(mdl.povms['Mdefault']['1'].todense(), gm_basis, leakage_basis))])
for lbl,op in mdl.operations.items():
new_mdl.operations[lbl] = pygsti.obj.FullDenseOp(
pygsti.tools.change_basis(op.todense(), gm_basis, leakage_basis))
new_mdl.basis = leakage_basis
return new_mdl
def changebasis_3level_results(results):
new_results = results.copy()
for estlbl,est in results.estimates.items():
for mlbl,mdl in est.models.items():
if isinstance(mdl,(list,tuple)): #assume a list/tuple of models
new_results.estimates[estlbl].models[mlbl] = \
[ changebasis_3level_model(m) for m in mdl ]
else:
new_results.estimates[estlbl].models[mlbl] = changebasis_3level_model(mdl)
return new_results
results_3level_leakage_basis = changebasis_3level_results( results_3level )
pygsti.report.create_standard_report({'two-level': results_2level, 'three-level': results_3level_leakage_basis},
"example_files/leakage_report", "Leakage Example Report")
#advancedOptions={'autosize': 'none'})
*** Creating workspace *** *** Generating switchboard *** Found standard clifford compilation from std1Q_XYI
/Users/enielse/research/pyGSTi/packages/pygsti/report/factory.py:785: UserWarning: Idle tomography failed: Label{layers}
*** Generating tables *** *** Generating plots *** Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. *** Merging into template file *** Output written to example_files/leakage_report directory *** Report Generation Complete! Total time 30.2682s ***
<pygsti.report.workspace.Workspace at 0x121141630>
Open the report here
# use "kite" density-matrix structure
def to_2plus1_superop(superop_2level):
ret = np.zeros((5,5),'d')
ret[0:4,0:4] = superop_2level
ret[4,4] = 1.0 #leave leakage population where it is
return ret
#Tack on a single extra "0" for the 5-th dimension corresponding
# to the classical leakage level population.
rho0 = np.concatenate( (mdl_2level_ideal.preps['rho0'],[[0]]), axis=0)
E0 = np.concatenate( (mdl_2level_ideal.povms['Mdefault']['0'],[[0]]), axis=0)
E1 = np.concatenate( (mdl_2level_ideal.povms['Mdefault']['1'],[[0]]), axis=0)
sslbls = pygsti.obj.StateSpaceLabels([('Qubit',),('Leakage',)],[(2,),(1,)])
mdl_2plus1_ideal = pygsti.obj.ExplicitOpModel(sslbls, 'gm')
mdl_2plus1_ideal['rho0'] = rho0
mdl_2plus1_ideal['Mdefault'] = pygsti.obj.UnconstrainedPOVM([('0',E0),('1',E1)])
mdl_2plus1_ideal['Gi'] = to_2plus1_superop(mdl_2level_ideal['Gi'])
mdl_2plus1_ideal['Gx'] = to_2plus1_superop(mdl_2level_ideal['Gi'])
mdl_2plus1_ideal['Gy'] = to_2plus1_superop(mdl_2level_ideal['Gi'])
results_2plus1 = pygsti.do_long_sequence_gst(ds, mdl_2plus1_ideal, prepfids, measfids,
germs, maxLengths, verbosity=3,
advancedOptions={"starting point": "target"})
--- Circuit Creation --- 305 sequences created Dataset has 305 entries: 305 utilized, 0 requested sequences were missing --- Iterative MLGST: Iter 1 of 1 305 operation sequences ---: --- Minimum Chi^2 GST --- Created evaluation tree with 1 subtrees. Will divide 1 procs into 1 (subtree-processing) groups of ~1 procs each, to distribute over 90 params (taken as 1 param groups of ~90 params). --- Outer Iter 0: norm_f = 1.24364e+09, mu=0, |J|=255630 --- Outer Iter 1: norm_f = 118724, mu=2.46191e+06, |J|=4148.05 --- Outer Iter 2: norm_f = 84982, mu=820638, |J|=3690.92 --- Outer Iter 3: norm_f = 77529.7, mu=273546, |J|=3614.72 --- Outer Iter 4: norm_f = 76388.2, mu=91182, |J|=3594.1 --- Outer Iter 5: norm_f = 74758.6, mu=30394, |J|=3564.1 --- Outer Iter 6: norm_f = 73202.6, mu=15306.2, |J|=3596.98 --- Outer Iter 7: norm_f = 72412.3, mu=5102.08, |J|=3791.87 --- Outer Iter 8: norm_f = 71574.8, mu=1700.69, |J|=4422.36 --- Outer Iter 9: norm_f = 71115.3, mu=680.571, |J|=5558.64 --- Outer Iter 10: norm_f = 70928.1, mu=226.857, |J|=6091.95 --- Outer Iter 11: norm_f = 70903.7, mu=75.619, |J|=6573.42 --- Outer Iter 12: norm_f = 70612.8, mu=50.4127, |J|=6710.9 --- Outer Iter 13: norm_f = 69398.7, mu=1075.47, |J|=6299.2 --- Outer Iter 14: norm_f = 68306.2, mu=779.583, |J|=4824.62 --- Outer Iter 15: norm_f = 67823.2, mu=6261.97, |J|=4949.68 --- Outer Iter 16: norm_f = 67490.5, mu=6697.25, |J|=4896.8 --- Outer Iter 17: norm_f = 67162.3, mu=7139.91, |J|=4928.2 --- Outer Iter 18: norm_f = 66605.7, mu=7153.15, |J|=5157.1 --- Outer Iter 19: norm_f = 66243.5, mu=8473.51, |J|=5910.77 --- Outer Iter 20: norm_f = 65658.5, mu=9095.27, |J|=6328.83 --- Outer Iter 21: norm_f = 65030.6, mu=18792.6, |J|=6937.75 --- Outer Iter 22: norm_f = 64421.3, mu=18767.7, |J|=7183.83 --- Outer Iter 23: norm_f = 64070.1, mu=18763.1, |J|=7902.07 --- Outer Iter 24: norm_f = 63719.8, mu=14475.1, |J|=8422.75 --- Outer Iter 25: norm_f = 63366, mu=10494.3, |J|=9939.72 --- Outer Iter 26: norm_f = 62945.6, mu=3498.1, |J|=11640.4 --- Outer Iter 27: norm_f = 62352.3, mu=1424.59, |J|=12533.2 --- Outer Iter 28: norm_f = 62220.6, mu=1.55603e+07, |J|=12923.5 --- Outer Iter 29: norm_f = 62136.8, mu=1.29088e+07, |J|=18073.9 --- Outer Iter 30: norm_f = 62104.9, mu=4.30293e+06, |J|=16315.5 --- Outer Iter 31: norm_f = 62074.4, mu=1.43431e+06, |J|=16691.7 --- Outer Iter 32: norm_f = 62050.8, mu=478103, |J|=15755.5 --- Outer Iter 33: norm_f = 62027.9, mu=159368, |J|=16141.1 --- Outer Iter 34: norm_f = 61976, mu=53122.6, |J|=16197.6 --- Outer Iter 35: norm_f = 61841.2, mu=17707.5, |J|=16741.4 --- Outer Iter 36: norm_f = 61524.5, mu=5902.51, |J|=17371.9 --- Outer Iter 37: norm_f = 60847.7, mu=1967.5, |J|=18946.7 --- Outer Iter 38: norm_f = 60510.2, mu=15760.6, |J|=12102.6 --- Outer Iter 39: norm_f = 58570.2, mu=5.3796e+06, |J|=20510.2 --- Outer Iter 40: norm_f = 58548.5, mu=7.05875e+06, |J|=97573.5 --- Outer Iter 41: norm_f = 58400.8, mu=5.43615e+06, |J|=33072.7 --- Outer Iter 42: norm_f = 58323.5, mu=1.81546e+06, |J|=24674.7 --- Outer Iter 43: norm_f = 58135, mu=605155, |J|=25141.6 --- Outer Iter 44: norm_f = 57594.7, mu=201718, |J|=21948.8 --- Outer Iter 45: norm_f = 56254.3, mu=67239.4, |J|=17377.8 --- Outer Iter 46: norm_f = 53890.6, mu=22413.1, |J|=17618.3 --- Outer Iter 47: norm_f = 51060.5, mu=7471.05, |J|=25556.8 --- Outer Iter 48: norm_f = 48553.9, mu=6814.6, |J|=18395.9 --- Outer Iter 49: norm_f = 48451.7, mu=7.44336e+07, |J|=20679.7 --- Outer Iter 50: norm_f = 48223.4, mu=2.48112e+07, |J|=20158 --- Outer Iter 51: norm_f = 47908.8, mu=8.59843e+06, |J|=31843.3 --- Outer Iter 52: norm_f = 47725, mu=2.86614e+06, |J|=24966.9 --- Outer Iter 53: norm_f = 47520, mu=955382, |J|=26381.3 --- Outer Iter 54: norm_f = 47319.8, mu=318461, |J|=25410.5 --- Outer Iter 55: norm_f = 47164.4, mu=106154, |J|=24379.4 --- Outer Iter 56: norm_f = 46907.9, mu=35384.5, |J|=23600 --- Outer Iter 57: norm_f = 46304.8, mu=11794.8, |J|=22068.3 --- Outer Iter 58: norm_f = 44969.6, mu=3931.61, |J|=24374.4 --- Outer Iter 59: norm_f = 43343.9, mu=2621.07, |J|=52276.5 --- Outer Iter 60: norm_f = 42469.2, mu=8445.74, |J|=89068.1 --- Outer Iter 61: norm_f = 41220.4, mu=5630.49, |J|=47371.5 --- Outer Iter 62: norm_f = 40610.6, mu=15014.6, |J|=30842 --- Outer Iter 63: norm_f = 39770.6, mu=10009.8, |J|=31737.1 --- Outer Iter 64: norm_f = 38576.5, mu=6673.18, |J|=135099 --- Outer Iter 65: norm_f = 37882.1, mu=17795.1, |J|=59527 --- Outer Iter 66: norm_f = 36924.8, mu=11863.4, |J|=36466.8 --- Outer Iter 67: norm_f = 36395.6, mu=14042.5, |J|=58030.4 --- Outer Iter 68: norm_f = 34909.2, mu=15422.7, |J|=41186.2 --- Outer Iter 69: norm_f = 25883.2, mu=5140.9, |J|=8838.31 --- Outer Iter 70: norm_f = 25821.8, mu=5.61524e+07, |J|=9415.83 --- Outer Iter 71: norm_f = 25710.6, mu=1.87175e+07, |J|=11055.2 --- Outer Iter 72: norm_f = 25611, mu=1.35695e+07, |J|=15154.1 --- Outer Iter 73: norm_f = 25536.2, mu=7.55276e+06, |J|=12694.6 --- Outer Iter 74: norm_f = 25474.4, mu=2.51759e+06, |J|=13694.4 --- Outer Iter 75: norm_f = 25372.1, mu=839196, |J|=13384.8 --- Outer Iter 76: norm_f = 25162.8, mu=279732, |J|=13840.3 --- Outer Iter 77: norm_f = 24687.1, mu=93244, |J|=13200.4 --- Outer Iter 78: norm_f = 23409.4, mu=31081.3, |J|=12234.2 --- Outer Iter 79: norm_f = 19735.1, mu=10360.4, |J|=9825.76 --- Outer Iter 80: norm_f = 14163.6, mu=6906.96, |J|=10453.7 --- Outer Iter 81: norm_f = 13865.5, mu=442087, |J|=93836.1 --- Outer Iter 82: norm_f = 13191.2, mu=222370, |J|=22834.9 --- Outer Iter 83: norm_f = 13107, mu=320192, |J|=13272.2 --- Outer Iter 84: norm_f = 12094.7, mu=106731, |J|=11792.7 --- Outer Iter 85: norm_f = 11559.9, mu=106382, |J|=12223.7 --- Outer Iter 86: norm_f = 10314, mu=35460.6, |J|=11506.8 --- Outer Iter 87: norm_f = 9209.45, mu=33416.7, |J|=12317.4 --- Outer Iter 88: norm_f = 7099.7, mu=11138.9, |J|=11237 --- Outer Iter 89: norm_f = 4308.38, mu=3712.97, |J|=11886.4 --- Outer Iter 90: norm_f = 3279.34, mu=9901.24, |J|=13309.3 --- Outer Iter 91: norm_f = 2332.54, mu=6600.83, |J|=11630.8 --- Outer Iter 92: norm_f = 1419.22, mu=2438.53, |J|=11854.7 --- Outer Iter 93: norm_f = 996.859, mu=2229.78, |J|=12670.8 --- Outer Iter 94: norm_f = 751.65, mu=912.108, |J|=10595.4 --- Outer Iter 95: norm_f = 689.885, mu=304.036, |J|=10225.9 --- Outer Iter 96: norm_f = 665.816, mu=304.653, |J|=12151.2 --- Outer Iter 97: norm_f = 532.045, mu=170.053, |J|=12365.1 --- Outer Iter 98: norm_f = 443.769, mu=324.131, |J|=12753.9 --- Outer Iter 99: norm_f = 370.178, mu=136.528, |J|=10709.4 --- Outer Iter 100: norm_f = 349.476, mu=60.9215, |J|=10246.4 --- Outer Iter 101: norm_f = 343.463, mu=20.3072, |J|=10330.6 --- Outer Iter 102: norm_f = 342.654, mu=6.76906, |J|=10401.6 --- Outer Iter 103: norm_f = 342.501, mu=2.25635, |J|=10417.7 --- Outer Iter 104: norm_f = 342.491, mu=0.752118, |J|=10420.3 --- Outer Iter 105: norm_f = 342.49, mu=0.250706, |J|=10420.2 Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 1e-06 Sum of Chi^2 = 342.49 (305 data params - 65 model params = expected mean of 240; p-value = 1.52201e-05) Completed in 13.1s 2*Delta(log(L)) = 344.44 Iteration 1 took 13.1s Switching to ML objective (last iteration) --- MLGST --- --- Outer Iter 0: norm_f = 172.22, mu=0, |J|=7358.03 --- Outer Iter 1: norm_f = 171.499, mu=2396.3, |J|=7476.83 --- Outer Iter 2: norm_f = 171.485, mu=798.768, |J|=7474.31 --- Outer Iter 3: norm_f = 171.477, mu=266.256, |J|=7474.09 --- Outer Iter 4: norm_f = 171.472, mu=88.752, |J|=7473.33 --- Outer Iter 5: norm_f = 171.47, mu=29.584, |J|=7472.42 --- Outer Iter 6: norm_f = 171.467, mu=9.86134, |J|=7471.75 --- Outer Iter 7: norm_f = 171.465, mu=3.28711, |J|=7472.02 --- Outer Iter 8: norm_f = 171.465, mu=1.0957, |J|=7472.82 Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 1e-06 Maximum log(L) = 171.465 below upper bound of -458095 2*Delta(log(L)) = 342.929 (305 data params - 65 model params = expected mean of 240; p-value = 1.41955e-05) Completed in 1.1s 2*Delta(log(L)) = 342.929 Final MLGST took 1.1s Iterative MLGST Total Time: 14.2s -- Adding Gauge Optimized (go0) -- --- Re-optimizing logl after robust data scaling --- --- MLGST ---
/Users/enielse/research/pyGSTi/packages/pygsti/algorithms/gaugeopt.py:240: UserWarning: No gauge group specified, so no gauge optimization performed.
--- Outer Iter 0: norm_f = 171.465, mu=0, |J|=7472.82 Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 1e-06 Maximum log(L) = 171.465 below upper bound of -458095 2*Delta(log(L)) = 342.929 (305 data params - 65 model params = expected mean of 240; p-value = 1.41955e-05) Completed in 0.1s -- Adding Gauge Optimized (go0) --
pygsti.report.create_standard_report({'two-level': results_2level, 'three-level': results_3level_leakage_basis,
'two+one level': results_2plus1},
"example_files/leakage_report", "Leakage Example Report",
advancedOptions={'autosize': 'none'})
*** Creating workspace *** *** Generating switchboard *** Found standard clifford compilation from std1Q_XYI
/Users/enielse/research/pyGSTi/packages/pygsti/report/factory.py:785: UserWarning: Idle tomography failed: Label{layers}
*** Generating tables ***
/Users/enielse/research/pyGSTi/packages/pygsti/extras/rb/theory.py:200: UserWarning: Output may be unreliable because the model is not approximately trace-preserving.
*** Generating plots *** Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance. *** Merging into template file *** Output written to example_files/leakage_report directory *** Report Generation Complete! Total time 43.5036s ***
<pygsti.report.workspace.Workspace at 0x1286e5198>
Open the report here