!mpiexec
Microsoft MPI Startup Program [Version 10.1.12498.52] Launches an application on multiple hosts. Usage: mpiexec [options] executable [args] [ : [options] exe [args] : ... ] mpiexec -configfile <file name> Common options: -n <num_processes> -env <env_var_name> <env_var_value> -wdir <working_directory> -hosts n host1 [m1] host2 [m2] ... hostn [mn] -cores <num_cores_per_host> -lines -debug [0-3] -logfile <log file> Examples: mpiexec -n 4 pi.exe mpiexec -hosts 1 server1 master : -n 8 worker For a complete list of options, run mpiexec -help2 For a list of environment variables, run mpiexec -help3 You can reach the Microsoft MPI team via email at askmpi@microsoft.com
from mpi4py import MPI
--------------------------------------------------------------------------- ModuleNotFoundError Traceback (most recent call last) ~\AppData\Local\Temp\ipykernel_3112\4176240000.py in <cell line: 1>() ----> 1 from mpi4py import MPI ModuleNotFoundError: No module named 'mpi4py'
!pip install mpi4py
Defaulting to user installation because normal site-packages is not writeable Collecting mpi4py Downloading mpi4py-3.1.6-cp39-cp39-win_amd64.whl (475 kB) -------------------------------------- 475.9/475.9 kB 1.6 MB/s eta 0:00:00 Installing collected packages: mpi4py Successfully installed mpi4py-3.1.6
from mpi4py import MPI
pname = MPI.Get_processor_name()
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
print(f"Hello from {pname} - {rank}/{size}")
Hello from pf-cai-p03-pc0 - 0/1
%%writefile mpi01.py
from mpi4py import MPI
pname = MPI.Get_processor_name()
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
print(f"Hello from {pname} - {rank}/{size}")
Overwriting mpi01.py
!mpiexec python mpi01.py
Hello from pf-cai-p03-pc0 - 3/6 Hello from pf-cai-p03-pc0 - 4/6 Hello from pf-cai-p03-pc0 - 1/6 Hello from pf-cai-p03-pc0 - 2/6 Hello from pf-cai-p03-pc0 - 5/6 Hello from pf-cai-p03-pc0 - 0/6
%%writefile mpi02.py
from mpi4py import MPI
from time import sleep
from random import randrange
pname = MPI.Get_processor_name()
Comm = MPI.COMM_WORLD
size = Comm.Get_size()
rank = Comm.Get_rank()
print(f"{pname}: start {rank}/{size}")
n = randrange(3, 10)
print(f'{pname}: {n} seconds')
sleep(n)
print(f'{pname}: end')
Writing mpi02.py
!mpiexec python mpi02.py
pf-cai-p03-pc0: start 2/6 pf-cai-p03-pc0: 3 seconds pf-cai-p03-pc0: end pf-cai-p03-pc0: start 0/6 pf-cai-p03-pc0: 4 seconds pf-cai-p03-pc0: end pf-cai-p03-pc0: start 5/6 pf-cai-p03-pc0: 7 seconds pf-cai-p03-pc0: end pf-cai-p03-pc0: start 1/6 pf-cai-p03-pc0: 8 seconds pf-cai-p03-pc0: end pf-cai-p03-pc0: start 3/6 pf-cai-p03-pc0: 9 seconds pf-cai-p03-pc0: end pf-cai-p03-pc0: start 4/6 pf-cai-p03-pc0: 9 seconds pf-cai-p03-pc0: end
%%writefile mpi03.py
from mpi4py import MPI
from time import sleep
from random import randrange
pname = MPI.Get_processor_name()
Comm = MPI.COMM_WORLD
size = Comm.Get_size()
rank = Comm.Get_rank()
data = Comm.bcast(
{'pds': 7} if rank == 2 else None,
root = 2)
print(rank, data)
Overwriting mpi03.py
!mpiexec python mpi03.py
2 {'pds': 7} 0 {'pds': 7} 3 {'pds': 7} 4 {'pds': 7} 1 {'pds': 7} 5 {'pds': 7}
%%writefile mpi04.py
from mpi4py import MPI
from time import sleep
from random import randrange
import numpy as np
pname = MPI.Get_processor_name()
Comm = MPI.COMM_WORLD
size = Comm.Get_size()
rank = Comm.Get_rank()
#2 cisla (realne)
data = np.zeros(2)
if rank == 3:
data[0] = 9
data[1] = 10
Comm.Bcast(data, root=3)
print(rank, data)
Writing mpi04.py
!mpiexec python mpi04.py
1 [ 9. 10.] 4 [ 9. 10.] 2 [ 9. 10.] 5 [ 9. 10.] 0 [ 9. 10.] 3 [ 9. 10.]
%%writefile mpi05.py
from mpi4py import MPI
from time import sleep
from random import randrange
import numpy as np
pname = MPI.Get_processor_name()
Comm = MPI.COMM_WORLD
size = Comm.Get_size()
rank = Comm.Get_rank()
#bariera - pockanie sa
Comm.Barrier()
#jedno cele cislo
n = np.full(1, 8 if rank == 1 else 0, dtype='i4')
Comm.Bcast((n, MPI.INT), root=1)
print(rank, n)
Writing mpi05.py
!mpiexec python mpi05.py
1 [8] 5 [8] 3 [8] 2 [8] 4 [8] 0 [8]
%%writefile mpi06.py
from mpi4py import MPI
from time import sleep
from random import randrange
import numpy as np
pname = MPI.Get_processor_name()
Comm = MPI.COMM_WORLD
size = Comm.Get_size()
rank = Comm.Get_rank()
total = Comm.reduce(rank+1, op=MPI.SUM) #root=0
# 0|1|2|3 #rank
# -|-|-|-
# 1|2|3|4 #value sent to REDUCE
if rank == 0:
print(f'Total sum: {total}')
else:
print(f'. {rank}:{total}')
Overwriting mpi06.py
!mpiexec python mpi06.py
. 5:None . 4:None . 2:None Celkova suma: 21 . 3:None . 1:None
Every process generates one number, reduce minimum to rank 1.