#!/usr/bin/env python # coding: utf-8 # # Running experiments on IBM Q Processors # This tutorial will demonstrate how to run an experiment on IBM Q Processors. To do so you will need [QisKit](https://qiskit.org/) installed and an [IBM Q account](https://quantum-computing.ibm.com/). # # This was last run with QisKit versions: qiskit.__qiskit_version__ = {'qiskit-terra': '0.25.3', 'qiskit': '0.44.3', 'qiskit-aer': None, 'qiskit-ignis': None, 'qiskit-ibmq-provider': '0.20.2', 'qiskit-nature': None, 'qiskit-finance': None, 'qiskit-optimization': None, 'qiskit-machine-learning': None} qiskit_ibm_provider.__version__ = '0.7.2' # In[7]: import pygsti from pygsti.extras.devices import ExperimentalDevice from pygsti.extras import ibmq from pygsti.processors import CliffordCompilationRules as CCR # In[ ]: from qiskit_ibm_provider import IBMProvider # ## Load your IBM Q access # First, load you IBM Q account, get your `provider` and select a device. To do this, follow IBM Q's instructions. # In[8]: # If your first time, you may need to initialize your account with your IBMQ API token #IBMProvider.save_account(token="") # In[3]: # You can use your own instance if you have different credentials #provider = IBMProvider(instance='ibm-q/open/main') # You can leave it blank to use the default for your account provider = IBMProvider() # In[4]: dev_name = 'ibm_lagos' backend = provider.get_backend(dev_name) # ## Make a ProcessorSpec for IBM Q's processor. # Next we create a ProcessorSpec for the device you're going to run on. This ProcessorSpec must also contain the details needed for creating the pyGSTi experiment design that you want to run, which you can tweak by varying the optional arguments to the `devices.create_processor_spec()` function. # # In `v0.9.12`, the `pygsti.extras.devices` module has been updated. You can still use the existing files in `pygsti.extras.devices` if you are offline, and thus may still want to add your own device files. However, you can now also simply use the IBMQ backend to create an `ExperimentalDevice` which is compatible with ProcessorSpecs and Models. # In[6]: # Using the configuration files in pygsti.extras.devices (legacy and may not be up-to-date) device = ExperimentalDevice.from_legacy_device('ibmq_bogota') # In[5]: # Using the active backend to pull current device specification device = ExperimentalDevice.from_qiskit_backend(backend) # In[ ]: pspec = device.create_processor_spec(['Gc{}'.format(i) for i in range(24)] + ['Gcnot']) # ## Create an ExperimentDesign # Next we create an `ExperimentDesign` that specifies the circuits you want to run on that device. Here we create a very simple mirror circuit benchmarking experiment. We'll use randomized mirror circuits, constructed using a `MirrorRBDesign`. # # First we pick the circuit design parameters: # In[ ]: #circuit design parameters depths = [0, 2, 4, 16, 32, 64] circuits_per_shape = 20 # dict setting the circuit widths (# qubits) you want to probe # and the qubits you want to use at each width # You can use device.graph.edges() to make sure these are connected components qubit_lists = {} qubit_lists[1] = [('Q0',),] qubit_lists[2] = [('Q0', 'Q1'),] qubit_lists[3] = [('Q0', 'Q1', 'Q2'),] qubit_lists[4] = [('Q0', 'Q1', 'Q2', 'Q3')] widths = list(qubit_lists.keys()) print('total circuits: {}'.format(circuits_per_shape*len(widths)*len(depths))) total_circuits = 0 for w in widths: total_circuits += len(qubit_lists[w]) * circuits_per_shape * len(depths) print('full total circuits: {}'.format(total_circuits) ) # We'll use the `edgegrab` sampler, which requires specifying the expected number # of two-qubit gates per random layer. twoQmean = {w:w/8 for w in widths} if 1 in widths: twoQmean[1] = 0 # No two-qubit gates in one-qubit circuits. # In[ ]: # In order to do Mirror RB, we need some Clifford compilations. See the RB-MirrorRB tutorial for more details. compilations = {'absolute': CCR.create_standard(pspec, 'absolute', ('paulis', '1Qcliffords'), verbosity=0)} # In[ ]: edesigns_dict = {} edesign_index = 1 for w in widths: for qubits in qubit_lists[w]: sub_edesign = pygsti.protocols.MirrorRBDesign(pspec, depths, circuits_per_shape, qubit_labels=qubits, clifford_compilations=compilations, sampler='edgegrab', samplerargs=[twoQmean[w],]) edesigns_dict[str(edesign_index)] = sub_edesign edesign_index += 1 combined_edesign = pygsti.protocols.CombinedExperimentDesign(edesigns_dict) # ## Running on IBM Q # We're now ready to run on the IBM Q processor. We do this using an `IBMQExperiment` object, which # # First it converts pyGSTi circuits into jobs that can be submitted to IBM Q. **This step includes transpiling of the pyGSTi circuits into OpenQASM** (and then into QisKit objects). # In[ ]: exp = ibmq.IBMQExperiment(combined_edesign, pspec, circuits_per_batch=75, num_shots=1024) # We're now ready to submit this experiment to IBM Q. # In[ ]: exp.submit(backend) # You can then monitor the jobs. If get an error message, you can query the error using `exp['qjob'][i].error_message()` for batch `i`. # In[ ]: exp.monitor() # You can then grab the results, **Once you see that all the jobs are complete** (`.retrieve_results()` will just hang if the jobs have not yet completed). # In[ ]: exp.retrieve_results() # This `IBMQExperiment` object now contains the results of your experiment. It contains much of the information about exactly what was submitted to IBM Q, and raw results objects that IBM Q returned. # In[ ]: print(exp.keys()) # But, most importantly, it contains the data formatted into a pyGSTi `ProtocolData` object, which is the packaged-up data that pyGSTi analysis proctols use. # In[ ]: data = exp['data'] # We can write this data to disk, which saves the `ProtocolData` in the standard pyGSTi format. It also pickles (or JSONs) up all of the additional information contained then `IBMQExperiment` object, e.g., the job IDs, in a subfolder `ibmqexperiment`. # In[ ]: exp.write('test_ibmq_experiment') # If you only want to load the `ProtocolData` you can do this using pyGSTi's standard `io` functions. We can also load the `IBMQExperiment` object, which will skip unpickling any objects when the unpickling fails (e.g., due to changes in `QisKit`). # # New in '0.9.12': IBM jobs are no longer pickle-able. Instead, they will be retrieved from the server. However, this requires the provider to be passed in at load time. # In[ ]: loaded_exp = ibmq.IBMQExperiment.from_dir('test_ibmq_experiment', provider) # In[ ]: # Now we can run as before loaded_exp.monitor() # ## Analzing the results # Because `retrieve_results()` has formatted the data into a `ProctocolData` object, we can just hand this to the analysis protocol(s) that are designed for analyzing this type of data. Here we'll analyze this data using a standard RB curve-fitting analysis. # In[ ]: rb = pygsti.protocols.RandomizedBenchmarking(datatype='adjusted_success_probabilities', defaultfit='A-fixed') results = {} for key in data.keys(): results[key] = rb.run(data[key]) # In[ ]: ws = pygsti.report.Workspace() ws.init_notebook_mode(autodisplay=True) # In[ ]: for i in data.keys(): print(i) ws.RandomizedBenchmarkingPlot(results[i]) # In[ ]: