Skip to content

Commit 0155844

Browse files
committed
Nonblocking message chain excercise
1 parent ebe4c8f commit 0155844

File tree

7 files changed

+384
-25
lines changed

7 files changed

+384
-25
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,6 @@ Exercise material and model answers for the CSC course
1717
- [Message chain with Sendrecv](message-chain-sendrecv/)
1818
- [Broadcast and scatter](broadcast-scatter)
1919
- [Collective operations](collectives/)
20-
- [Communicators and collectives](communicator)
20+
- [Non-blocking communication](message-chain-non-blocking/)
2121
- [(Bonus) Heat equation solver](heat-equation/)
2222

message-chain-nonblocking/README.md

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
## Message chain
2+
3+
Write a program where every MPI task sends data to the next one.
4+
Let `ntasks` be the number of the tasks, and `myid` the rank of the
5+
current task. Your program should work as follows:
6+
7+
- Every task with a rank less than `ntasks-1` sends a message to task
8+
`myid+1`. For example, task 0 sends a message to task 1.
9+
- The message content is an integer array where each element is initialised to
10+
`myid`.
11+
- The message tag is the receiver's rank.
12+
- The sender prints out the number of elements it sends and the tag it used.
13+
- All tasks with rank > 0 receive messages.
14+
- Each receiver prints out their `myid` and the first element in the
15+
received array.
16+
17+
1. Implement the program described above using non-blocking communication, *i.e.*
18+
`MPI_Isend`, `MPI_Irecv`, and `MPI_Wait`. Utilize
19+
`MPI_PROC_NULL` when treating the special cases of
20+
the first and the last task so that no individual `MPI_Send`s or
21+
`MPI_Recv`s are needed. You may start from scratch or use the skeleton code
22+
([c/skeleton.c](cpp/skeleton.cpp) or [fortran/skeleton.F90](fortran/skeleton.F90))
23+
as a starting point.
24+
25+
2. The skeleton code prints out the time spent in communication.
26+
Investigate the timings with different numbers of MPI tasks
27+
(e.g. 2, 4, 8, 16, ...). Compare the results to the implementation with
28+
[individual `MPI_Send`s and `MPI_Recv`'s](../message-chain/) and pay attention
29+
especially to rank 0. Can you explain the behaviour?
30+
31+
3. Write a version that uses `MPI_Waitall` instead of `MPI_Wait`s.
+77
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
#include <cstdio>
2+
#include <vector>
3+
#include <mpi.h>
4+
5+
void print_ordered(double t);
6+
7+
int main(int argc, char *argv[])
8+
{
9+
int i, myid, ntasks;
10+
constexpr int size = 10000000;
11+
std::vector<int> message(size);
12+
std::vector<int> receiveBuffer(size);
13+
MPI_Status status;
14+
15+
double t0, t1;
16+
17+
int source, destination;
18+
19+
MPI_Init(&argc, &argv);
20+
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
21+
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
22+
23+
// Initialize message
24+
for (i = 0; i < size; i++) {
25+
message[i] = myid;
26+
}
27+
28+
// TODO: set source and destination ranks
29+
// Treat boundaries with MPI_PROC_NULL
30+
31+
destination =
32+
33+
source =
34+
35+
// end TODO
36+
37+
// Start measuring the time spent in communication
38+
MPI_Barrier(MPI_COMM_WORLD);
39+
t0 = MPI_Wtime();
40+
41+
// TODO: Send messages
42+
43+
printf("Sender: %d. Sent elements: %d. Tag: %d. Receiver: %d\n",
44+
myid, size, myid + 1, destination);
45+
46+
// TODO: Receive messages
47+
48+
printf("Receiver: %d. first element %d.\n",
49+
myid, receiveBuffer[0]);
50+
51+
// Finalize measuring the time and print it out
52+
t1 = MPI_Wtime();
53+
MPI_Barrier(MPI_COMM_WORLD);
54+
fflush(stdout);
55+
56+
print_ordered(t1 - t0);
57+
58+
MPI_Finalize();
59+
return 0;
60+
}
61+
62+
void print_ordered(double t)
63+
{
64+
int i, rank, ntasks;
65+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
66+
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
67+
68+
if (rank == 0) {
69+
printf("Time elapsed in rank %2d: %6.3f\n", rank, t);
70+
for (i = 1; i < ntasks; i++) {
71+
MPI_Recv(&t, 1, MPI_DOUBLE, i, 11, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
72+
printf("Time elapsed in rank %2d: %6.3f\n", i, t);
73+
}
74+
} else {
75+
MPI_Send(&t, 1, MPI_DOUBLE, 0, 11, MPI_COMM_WORLD);
76+
}
77+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
#include <cstdio>
2+
#include <vector>
3+
#include <mpi.h>
4+
5+
void print_ordered(double t);
6+
7+
int main(int argc, char *argv[])
8+
{
9+
int i, myid, ntasks;
10+
constexpr int size = 10000000;
11+
std::vector<int> message(size);
12+
std::vector<int> receiveBuffer(size);
13+
MPI_Status statuses[2];
14+
MPI_Request requests[2];
15+
16+
double t0, t1;
17+
18+
int source, destination;
19+
20+
MPI_Init(&argc, &argv);
21+
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
22+
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
23+
24+
// Initialize message
25+
for (i = 0; i < size; i++) {
26+
message[i] = myid;
27+
}
28+
29+
// Set source and destination ranks
30+
if (myid < ntasks - 1) {
31+
destination = myid + 1;
32+
} else {
33+
destination = MPI_PROC_NULL;
34+
}
35+
if (myid > 0) {
36+
source = myid - 1;
37+
} else {
38+
source = MPI_PROC_NULL;
39+
}
40+
41+
// Start measuring the time spent in communication
42+
MPI_Barrier(MPI_COMM_WORLD);
43+
t0 = MPI_Wtime();
44+
45+
// Send and receive messages
46+
MPI_Isend(message.data(), size, MPI_INT, destination, myid + 1,
47+
MPI_COMM_WORLD, &requests[0]);
48+
MPI_Irecv(receiveBuffer.data(), size, MPI_INT, source, MPI_ANY_TAG,
49+
MPI_COMM_WORLD, &requests[1]);
50+
MPI_Waitall(2, requests, statuses);
51+
printf("Sender: %d. Sent elements: %d. Tag: %d. Receiver: %d\n",
52+
myid, size, myid + 1, destination);
53+
printf("Receiver: %d. first element %d.\n",
54+
myid, receiveBuffer[0]);
55+
56+
// Finalize measuring the time and print it out
57+
t1 = MPI_Wtime();
58+
MPI_Barrier(MPI_COMM_WORLD);
59+
fflush(stdout);
60+
61+
print_ordered(t1 - t0);
62+
63+
MPI_Finalize();
64+
return 0;
65+
}
66+
67+
void print_ordered(double t)
68+
{
69+
int i, rank, ntasks;
70+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
71+
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
72+
73+
if (rank == 0) {
74+
printf("Time elapsed in rank %2d: %6.3f\n", rank, t);
75+
for (i = 1; i < ntasks; i++) {
76+
MPI_Recv(&t, 1, MPI_DOUBLE, i, 11, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
77+
printf("Time elapsed in rank %2d: %6.3f\n", i, t);
78+
}
79+
} else {
80+
MPI_Send(&t, 1, MPI_DOUBLE, 0, 11, MPI_COMM_WORLD);
81+
}
82+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
program basic
2+
use mpi
3+
use iso_fortran_env, only : REAL64
4+
5+
implicit none
6+
integer, parameter :: size = 10000000
7+
integer :: rc, myid, ntasks
8+
integer :: message(size)
9+
integer :: receiveBuffer(size)
10+
integer :: status(MPI_STATUS_SIZE)
11+
integer :: requests(2)
12+
13+
real(REAL64) :: t0, t1
14+
15+
integer :: source, destination
16+
17+
call mpi_init(rc)
18+
call mpi_comm_rank(MPI_COMM_WORLD, myid, rc)
19+
call mpi_comm_size(MPI_COMM_WORLD, ntasks, rc)
20+
21+
message = myid
22+
23+
! TODO: set source and destination ranks
24+
! Treat boundaries with MPI_PROC_NULL
25+
26+
destination =
27+
28+
source =
29+
30+
! end TODO
31+
32+
! Start measuring the time spent in communication
33+
call mpi_barrier(mpi_comm_world, rc)
34+
t0 = mpi_wtime()
35+
36+
! TODO: Send messages
37+
38+
write(*,'(A10,I3,A20,I8,A,I3,A,I3)') 'Sender: ', myid, &
39+
' Sent elements: ',size, &
40+
'. Tag: ', myid+1, '. Receiver: ', destination
41+
42+
! TODO: Receive messages
43+
44+
write(*,'(A10,I3,A,I3)') 'Receiver: ', myid, &
45+
' First element: ', receiveBuffer(1)
46+
47+
! Finalize measuring the time and print it out
48+
t1 = mpi_wtime()
49+
call mpi_barrier(mpi_comm_world, rc)
50+
call flush(6)
51+
52+
call print_ordered(t1 - t0)
53+
54+
call mpi_finalize(rc)
55+
56+
contains
57+
58+
subroutine print_ordered(t)
59+
implicit none
60+
real(REAL64) :: t
61+
62+
integer i
63+
64+
if (myid == 0) then
65+
write(*, '(A20, I3, A, F6.3)') 'Time elapsed in rank', myid, ':', t
66+
do i=1, ntasks-1
67+
call mpi_recv(t, 1, MPI_DOUBLE_PRECISION, i, 11, &
68+
MPI_COMM_WORLD, MPI_STATUS_IGNORE, rc)
69+
write(*, '(A20, I3, A, F6.3)') 'Time elapsed in rank', i, ':', t
70+
end do
71+
else
72+
call mpi_send(t, 1, MPI_DOUBLE_PRECISION, 0, 11, &
73+
MPI_COMM_WORLD, rc)
74+
end if
75+
end subroutine print_ordered
76+
77+
end program basic
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
program chain
2+
use mpi
3+
use iso_fortran_env, only : REAL64
4+
5+
implicit none
6+
integer, parameter :: size = 10000000
7+
integer :: rc, myid, ntasks
8+
integer :: message(size)
9+
integer :: receiveBuffer(size)
10+
integer :: status(MPI_STATUS_SIZE,2)
11+
12+
real(REAL64) :: t0, t1
13+
14+
integer :: source, destination
15+
integer :: count
16+
integer :: requests(2)
17+
18+
call mpi_init(rc)
19+
call mpi_comm_rank(MPI_COMM_WORLD, myid, rc)
20+
call mpi_comm_size(MPI_COMM_WORLD, ntasks, rc)
21+
22+
message = myid
23+
24+
! Set source and destination ranks
25+
if (myid < ntasks-1) then
26+
destination = myid + 1
27+
else
28+
destination = MPI_PROC_NULL
29+
end if
30+
if (myid > 0) then
31+
source = myid - 1
32+
else
33+
source = MPI_PROC_NULL
34+
end if
35+
36+
! Start measuring the time spent in communication
37+
call mpi_barrier(mpi_comm_world, rc)
38+
t0 = mpi_wtime()
39+
40+
! Receive messages in the back ground
41+
call mpi_irecv(receiveBuffer, size, MPI_INTEGER, source, &
42+
MPI_ANY_TAG, MPI_COMM_WORLD, requests(1), rc)
43+
! Send messages in the back ground
44+
call mpi_isend(message, size, MPI_INTEGER, destination, &
45+
myid + 1, MPI_COMM_WORLD, requests(2), rc)
46+
47+
! Blocking wait for messages
48+
call mpi_waitall(2, requests, status, rc)
49+
50+
! Use status parameter to find out the no. of elements received
51+
call mpi_get_count(status(:,1), MPI_INTEGER, count, rc)
52+
write(*,'(A10,I3,A20,I8,A,I3,A,I3)') 'Sender: ', myid, &
53+
' Sent elements: ', size, &
54+
'. Tag: ', myid + 1, &
55+
'. Receiver: ', destination
56+
write(*,'(A10,I3,A20,I8,A,I3,A,I3)') 'Receiver: ', myid, &
57+
'received elements: ', count, &
58+
'. Tag: ', status(MPI_TAG, 1), &
59+
'. Sender: ', status(MPI_SOURCE, 1)
60+
61+
! Finalize measuring the time and print it out
62+
t1 = mpi_wtime()
63+
call mpi_barrier(mpi_comm_world, rc)
64+
call flush(6)
65+
66+
call print_ordered(t1 - t0)
67+
68+
call mpi_finalize(rc)
69+
70+
contains
71+
72+
subroutine print_ordered(t)
73+
implicit none
74+
real(REAL64) :: t
75+
76+
integer i
77+
78+
if (myid == 0) then
79+
write(*, '(A20, I3, A, F6.3)') 'Time elapsed in rank', myid, ':', t
80+
do i=1, ntasks-1
81+
call mpi_recv(t, 1, MPI_DOUBLE_PRECISION, i, 11, &
82+
MPI_COMM_WORLD, MPI_STATUS_IGNORE, rc)
83+
write(*, '(A20, I3, A, F6.3)') 'Time elapsed in rank', i, ':', t
84+
end do
85+
else
86+
call mpi_send(t, 1, MPI_DOUBLE_PRECISION, 0, 11, &
87+
MPI_COMM_WORLD, rc)
88+
end if
89+
end subroutine print_ordered
90+
91+
end program chain

0 commit comments

Comments
 (0)