#!/usr/bin/env python # coding: utf-8 # # Simple usage of a set of MPI engines # This example assumes you've started a cluster of N engines (4 in this example) as part # of an MPI world. # # Our documentation describes [how to create an MPI profile](http://ipython.org/ipython-doc/dev/parallel/parallel_process.html#using-ipcluster-in-mpiexec-mpirun-mode) # and explains [basic MPI usage of the IPython cluster](http://ipython.org/ipython-doc/dev/parallel/parallel_mpi.html). # # # For the simplest possible way to start 4 engines that belong to the same MPI world, # you can run this in a terminal: # #
# ipcluster start --engines=MPI -n 4
# 
# # or start an MPI cluster from the cluster tab if you have one configured. # # Once the cluster is running, we can connect to it and open a view into it: # In[1]: from IPython.parallel import Client c = Client() view = c[:] # Let's define a simple function that gets the MPI rank from each engine. # In[2]: @view.remote(block=True) def mpi_rank(): from mpi4py import MPI comm = MPI.COMM_WORLD return comm.Get_rank() # In[3]: mpi_rank() # To get a mapping of IPython IDs and MPI rank (these do not always match), # you can use the get_dict method on AsyncResults. # In[4]: mpi_rank.block = False ar = mpi_rank() ar.get_dict() # With %%px cell magic, the next cell will actually execute *entirely on each engine*: # In[5]: get_ipython().run_cell_magic('px', '', "from mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\nif rank == 0:\n data = [(i+1)**2 for i in range(size)]\nelse:\n data = None\ndata = comm.scatter(data, root=0)\n\nassert data == (rank+1)**2, 'data=%s, rank=%s' % (data, rank)\n") # In[6]: view['data'] # In[6]: