#!/usr/bin/env python # coding: utf-8 # # Pure Data Analysis # # This tutorial covers different methods of analysing data *without* running GST. So far, there's only one, which checks for consistency between two (or more) datasets, called "Data Set Comparison". # # ## Data Set Comparison # This method declares that two or more `DataSet`s are "consistent" if the observed counts for the same gate strings across the data sets are all consistent with being generated by the same underlying gateset. This protocol can be used to test for, among other things, drift and crosstalk. It can also be used # to compare an experimental dataset to an "ideal" dataset. # In[1]: from __future__ import division, print_function import pygsti import numpy as np import scipy from scipy import stats from pygsti.construction import std1Q_XYI # Let's first compare two `Dataset` objects where the underlying gate sets are the same. The data sets we'll use will be GST datasets (which allows us to do some nice visualization), but arbitrary datasets will work in general, provided that the gate sequences across the datasets are the same. # In[2]: #Let's make our underlying gate set have a little bit of random unitary noise. gs_exp_0 = std1Q_XYI.gs_target.copy() gs_exp_0 = gs_exp_0.randomize_with_unitary(.01,seed=0) # In[3]: germs = std1Q_XYI.germs fiducials = std1Q_XYI.fiducials max_lengths = [1,2,4,8,16,32,64,128,256] gate_sequences = pygsti.construction.make_lsgst_experiment_list(std1Q_XYI.gates,fiducials,fiducials,germs,max_lengths) # In[4]: #Generate the data for the two datasets, using the same gate set, with 100 repetitions of each sequence. N=100 DS_0 = pygsti.construction.generate_fake_data(gs_exp_0,gate_sequences,N,'binomial',seed=10) DS_1 = pygsti.construction.generate_fake_data(gs_exp_0,gate_sequences,N,'binomial',seed=20) # In[5]: #Let's compare the two datasets. comparator_0_1 = pygsti.objects.DataComparator([DS_0,DS_1]) # In[6]: #Let's get the report from the comparator. comparator_0_1.report(confidence_level=0.95) # In[7]: #Create a workspace to show plots w = pygsti.report.Workspace() w.init_notebook_mode(connected=False, autodisplay=True) # In[8]: #As we expect, the datasets are consistent! #We can also visualize this in a few ways: #This is will show a histogram of the p-values associated with the different strings. #If the null hypothesis (that the underlying gate sets are the same) is true, #then we expect the distribution to roughly follow the dotted green line. w.DatasetComparisonHistogramPlot(comparator_0_1, log=True, display='pvalue') # In[9]: #Color box plot comparing two datasets from same gateset gssList = pygsti.construction.make_lsgst_structs(std1Q_XYI.gates, fiducials, fiducials, germs, max_lengths) w.ColorBoxPlot('dscmp', gssList[-1], None, None, dscomparator=comparator_0_1) #A lack of green boxes indicates consistency between datasets! # In[10]: #Now let's generate data from two similar but not identical datasets and see if our tests can detect them. # In[11]: gs_exp_1 = std1Q_XYI.gs_target.copy() gs_exp_1 = gs_exp_1.randomize_with_unitary(.01,seed=1) # In[12]: DS_2 = pygsti.construction.generate_fake_data(gs_exp_1,gate_sequences,N,'binomial',seed=30) # In[13]: #Let's make the comparator and get the report. comparator_1_2 = pygsti.objects.DataComparator([DS_1,DS_2]) comparator_1_2.report(confidence_level=0.95) # In[14]: #The datasets are significantly inconsistent! Let's see what the distribution of p-values looks like now: w.DatasetComparisonHistogramPlot(comparator_1_2) # In[15]: w.ColorBoxPlot('dscmp', gssList[-1], None, None, dscomparator=comparator_1_2) #comparator_1_2.box_plot(germs,fiducials,fiducials,max_lengths,.05,0,'Color box plot comparing two datasets from same gateset') #The red boxes indicate inconsistency between datasets! # In[16]: #While we only look at gate sets with Markovian, unitary errors here, this protocol can also be used when the #error is neither unitary nor Markovian.