Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
software:mpi4py:farber [2020-04-23 11:57] – anita | software:mpi4py:farber [2021-04-27 16:21] (current) – external edit 127.0.0.1 | ||
---|---|---|---|
Line 1: | Line 1: | ||
+ | ====== Mpi4py for Farber ====== | ||
+ | MPI for Python ([[http:// | ||
+ | <code bash> | ||
+ | $ vpkg_versions python-mpi4py | ||
+ | Available versions in package (* = default version): | ||
+ | |||
+ | [/ | ||
+ | python-mpi4py | ||
+ | * 1.3.1-python2.7.8 | ||
+ | 1.3.1-python3.2.5 | ||
+ | 3.0.3-python3.6.3 | ||
+ | python2.7.8 | ||
+ | python3.2.5 | ||
+ | python3.6.3 | ||
+ | </ | ||
+ | ===== Sample mpi4py script ===== | ||
+ | |||
+ | Adapted from the documentation provided by [[https:// | ||
+ | |||
+ | <code - scatter-gather.py> | ||
+ | # | ||
+ | # Loaded Modules | ||
+ | # | ||
+ | import numpy as np | ||
+ | from mpi4py import MPI | ||
+ | |||
+ | # | ||
+ | # Communicator | ||
+ | # | ||
+ | comm = MPI.COMM_WORLD | ||
+ | |||
+ | my_N = 4 | ||
+ | N = my_N * comm.size | ||
+ | |||
+ | if comm.rank == 0: | ||
+ | A = np.arange(N, | ||
+ | else: | ||
+ | #Note that if I am not the root processor A is an empty array | ||
+ | A = np.empty(N, dtype=np.float64) | ||
+ | |||
+ | my_A = np.empty(my_N, | ||
+ | |||
+ | # | ||
+ | # Scatter data into my_A arrays | ||
+ | # | ||
+ | comm.Scatter( [A, MPI.DOUBLE], | ||
+ | |||
+ | if comm.rank == 0: | ||
+ | print "After Scatter:" | ||
+ | |||
+ | for r in xrange(comm.size): | ||
+ | if comm.rank == r: | ||
+ | print "[%d] %s" % (comm.rank, my_A) | ||
+ | comm.Barrier() | ||
+ | |||
+ | # | ||
+ | # Everybody is multiplying by 2 | ||
+ | # | ||
+ | my_A *= 2 | ||
+ | |||
+ | # | ||
+ | # Allgather data into A again | ||
+ | # | ||
+ | comm.Allgather( [my_A, MPI.DOUBLE], | ||
+ | |||
+ | if comm.rank == 0: | ||
+ | print "After Allgather:" | ||
+ | |||
+ | for r in xrange(comm.size): | ||
+ | if comm.rank == r: | ||
+ | print "[%d] %s" % (comm.rank, A) | ||
+ | comm.Barrier() | ||
+ | </ | ||
+ | |||
+ | ===== Batch job ===== | ||
+ | |||
+ | Any MPI job requires you to use '' | ||
+ | |||
+ | The best results on Farber have been found by using the // | ||
+ | |||
+ | <code bash> | ||
+ | cp / | ||
+ | </ | ||
+ | |||
+ | and modify it for your application. Make sure you read the comments in the job script to select the appropriate option specifically modify the '' | ||
+ | |||
+ | <code bash> | ||
+ | vpkg_require python-mpi4py/ | ||
+ | </ | ||
+ | |||
+ | Lastly, modify '' | ||
+ | |||
+ | < | ||
+ | MY_EXE=" | ||
+ | MY_EXE_ARGS=(" | ||
+ | </ | ||
+ | |||
+ | All the options for '' | ||
+ | |||
+ | <code bash> | ||
+ | workgroup -g // | ||
+ | </ | ||
+ | |||
+ | then simple submit your job using | ||
+ | |||
+ | <code bash> | ||
+ | qsub mympi4py.qs | ||
+ | </ | ||
+ | |||
+ | or | ||
+ | |||
+ | <code bash> | ||
+ | qsub -l exclusive=1 mympi4py.qs | ||
+ | </ | ||
+ | |||
+ | for [[abstract: | ||
+ | |||
+ | Remember if you want to specify more cores for '' | ||
+ | |||
+ | <code bash> | ||
+ | qsub -l exclusive=1 -l standby=1 mympi4py.qs | ||
+ | </ | ||
+ | |||
+ | ==== Output ==== | ||
+ | |||
+ | The following output is based on the Python 2 script '' | ||
+ | |||
+ | <code bash> | ||
+ | [CGROUPS] UD Grid Engine cgroup setup commencing | ||
+ | [CGROUPS] WARNING: No OS-level core-binding can be made for mpi jobs | ||
+ | [CGROUPS] Setting 1073741824 bytes (vmem none bytes) on n039 (master) | ||
+ | [CGROUPS] | ||
+ | [CGROUPS] done. | ||
+ | |||
+ | Adding dependency `python/ | ||
+ | Adding dependency `openmpi/ | ||
+ | Adding package `python-mpi4py/ | ||
+ | Adding dependency `atlas/ | ||
+ | Adding package `python-numpy/ | ||
+ | GridEngine parameters: | ||
+ | mpirun | ||
+ | nhosts | ||
+ | nproc = 4 | ||
+ | executable | ||
+ | Open MPI vers = 1.8.2 | ||
+ | MPI flags = --display-map --mca btl ^tcp | ||
+ | -- begin OPENMPI run -- | ||
+ | Data for JOB [64887,1] offset 0 | ||
+ | |||
+ | | ||
+ | |||
+ | Data for node: n039 Num slots: 4 Max slots: 0 Num procs: 4 | ||
+ | Process OMPI jobid: [64887,1] App: 0 Process rank: 0 | ||
+ | Process OMPI jobid: [64887,1] App: 0 Process rank: 1 | ||
+ | Process OMPI jobid: [64887,1] App: 0 Process rank: 2 | ||
+ | Process OMPI jobid: [64887,1] App: 0 Process rank: 3 | ||
+ | |||
+ | | ||
+ | [2] [ 8. | ||
+ | After Scatter: | ||
+ | [0] [ 0. 1. 2. 3.] | ||
+ | [1] [ 4. 5. 6. 7.] | ||
+ | [3] [ 12. 13. 14. 15.] | ||
+ | [3] [ 0. | ||
+ | 30.] | ||
+ | [1] [ 0. | ||
+ | 30.] | ||
+ | After Allgather: | ||
+ | [0] [ 0. | ||
+ | 30.] | ||
+ | [2] [ 0. | ||
+ | 30.] | ||
+ | -- end OPENMPI run -- | ||
+ | </ | ||
+ | |||
+ | ===== Recipes ===== | ||
+ | If you need to build a Python virtualenv based on a collection of Python modules including mpi4py, then you will need to follow this recipe to get a properly-integrated mpi4py module. | ||
+ | |||
+ | * [[technical: |