forked from csc-training/advanced-mpi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathio.F90
162 lines (121 loc) · 5.02 KB
/
io.F90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
! I/O routines for heat equation solver
module io
use heat
use mpi
contains
! Output routine, saves the temperature distribution as a png image
! Arguments:
! curr (type(field)): variable with the temperature data
! iter (integer): index of the time step
subroutine write_field(curr, iter, parallel)
use pngwriter
implicit none
type(field), intent(in) :: curr
integer, intent(in) :: iter
type(parallel_data), intent(in) :: parallel
character(len=85) :: filename
integer :: stat
real(dp), dimension(:,:), allocatable, target :: full_data
integer :: p, ierr
if (parallel%rank == 0) then
allocate(full_data(curr%nx_full, curr%ny_full))
! Copy rand #0 data to the global array
full_data(1:curr%nx, 1:curr%ny) = curr%data(1:curr%nx, 1:curr%ny)
! Receive data from other ranks
do p = 1, parallel%size - 1
call mpi_recv(full_data(1:curr%nx, p*curr%ny + 1:(p + 1) * curr%ny), &
& curr%nx * curr%ny, MPI_DOUBLE_PRECISION, p, 22, &
& MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
end do
write(filename,'(A5,I4.4,A4,A)') 'heat_', iter, '.png'
stat = save_png(full_data, curr%nx_full, curr%ny_full, filename)
deallocate(full_data)
else
! Send data
call mpi_send(curr%data(1:curr%nx, 1:curr%ny), curr%nx * curr%ny, MPI_DOUBLE_PRECISION, 0, 22, &
& MPI_COMM_WORLD, ierr)
end if
end subroutine write_field
! Reads the temperature distribution from an input file
! Arguments:
! field0 (type(field)): field variable that will store the
! read data
! filename (char): name of the input file
! Note that this version assumes the input data to be in C memory layout
subroutine read_field(field0, filename, parallel)
implicit none
type(field), intent(out) :: field0
character(len=85), intent(in) :: filename
type(parallel_data), intent(out) :: parallel
integer :: nx, ny, i, ierr
character(len=2) :: dummy
real(dp), dimension(:,:), allocatable :: full_data, inner_data
open(10, file=filename)
! Read the header
read(10, *) dummy, nx, ny
call parallel_setup(parallel, nx, ny)
call set_field_dimensions(field0, nx, ny, parallel)
! The arrays for temperature field contain also a halo region
allocate(field0%data(0:field0%nx+1, 0:field0%ny+1))
allocate(inner_data(field0%nx, field0%ny))
if (parallel%rank == 0) then
allocate(full_data(nx, ny))
! Read the data
do i = 1, nx
read(10, *) full_data(i, 1:ny)
end do
end if
call mpi_scatter(full_data, nx * field0%ny, MPI_DOUBLE_PRECISION, inner_data, &
& nx * field0%ny, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
! Copy to full array containing also boundaries
field0%data(1:field0%nx, 1:field0%ny) = inner_data(:,:)
! Set the boundary values
field0%data(1:field0%nx, 0) = field0%data(1:field0%nx, 1)
field0%data(1:field0%nx, field0%ny + 1) = field0%data(1:field0%nx, field0%ny)
field0%data(0, 0:field0%ny + 1) = field0%data(1, 0:field0%ny + 1)
field0%data(field0%nx + 1, 0:field0%ny + 1) = field0%data(field0%nx, 0:field0%ny + 1)
close(10)
deallocate(inner_data)
if (parallel%rank == 0) then
deallocate(full_data)
end if
end subroutine read_field
! Write a restart checkpoint that contains field dimensions, current
! iteration number and temperature field.
subroutine write_restart(temp, parallel, iter)
implicit none
type(field), intent(in) :: temp
type(parallel_data), intent(in) :: parallel
integer, intent(in) :: iter
! TODO: open the file called CHECKPOINT (defined in heat.h)
! TODO: rank 0 writes out a header that has three values:
! temperature->nx_full
! temperature->ny_full
! iter
! TODO: all processes write out their temperature data into the file,
! i.e. the temperature->data array including the ghost layers.
! Use a collective write routine.
! TODO: close file
end subroutine write_restart
! Read a restart checkpoint that contains field dimensions, current
! iteration number and temperature field.
subroutine read_restart(temp, parallel, iter)
implicit none
type(field), intent(inout) :: temp
type(parallel_data), intent(inout) :: parallel
integer, intent(out) :: iter
integer :: rows, cols
! TODO: open the file called CHECKPOINT (defined in heat.h)
! TODO: all ranks read the header using collective IO
! set correct dimensions to MPI metadata
call parallel_setup(parallel, rows, cols)
! set local dimensions and allocate memory for the data
call set_field_dimensions(temp, rows, cols, parallel)
allocate(temp%data(0:temp%nx+1, 0:temp%ny+1))
temp%data(:,:) = 0.0
! TODO: all processes read their temperature data from the file,
! i.e. the temperature->data array including the ghost layers.
! Use a collective read routine.
! TODO: close file
end subroutine read_restart
end module io