#!/usr/bin/env python # Christoph Federrath from cfpack.defaults import * import cfpack as cfp import numpy as np import argparse from mpi4py import MPI # print function that prepends the MPI rank from which the print was initiated def print_mpi(string): comm = MPI.COMM_WORLD print("["+str(comm.Get_rank())+"] "+string, no_prefix=True) # print function that only lets the master MPI rank print def print_master(string): comm = MPI.COMM_WORLD if comm.Get_rank() == 0: print("["+str(comm.Get_rank())+"] "+string, no_prefix=True) # ===== the following applies in case we are running this in script mode ===== if __name__ == "__main__": # parse script arguments parser = argparse.ArgumentParser(description='Python MPI (mpi4py) demo.') args = parser.parse_args() # start a new timer timer = cfp.timer('mpi4py test') # get MPI ranks comm = MPI.COMM_WORLD nPE = comm.Get_size() myPE = comm.Get_rank() print_master("Total number of MPI ranks = "+str(nPE)) comm.Barrier() # define n and local, global arrays n = int(1e7) sum_local = np.array(0.0) sum_global = np.array(0.0) # === domain decomposition === mod = n % nPE div = n // nPE if mod != 0: # Why do this? ... div += 1 print_master("domain decomposition mod, div = "+str(mod)+", "+str(div)) my_start = myPE * div # loop start index my_end = (myPE+1) * div - 1 # loop end index # last PE gets the rest if (myPE == nPE-1): my_end = n print_mpi("my_start = "+str(my_start)+", my_end = "+str(my_end)) # loop over local chunk of loop for i in range(my_start, my_end+1): sum_local += i print_mpi("sum_local = "+str(sum_local)) comm.Barrier() # MPI collective communication (all reduce) comm.Allreduce(sum_local, sum_global, op=MPI.SUM) print_master("sum_global = "+str(sum_global)) # let the timer report timer.report()