Skip to content

Commit 5da454b

Browse files
committed
add exo hdf5 per iter
1 parent 19e3241 commit 5da454b

File tree

6 files changed

+631
-0
lines changed

6 files changed

+631
-0
lines changed

hdf5_02/CMakeLists.txt

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#=============================================================================
2+
# Copyright (C) 2015-2023 Commissariat a l'energie atomique et aux energies alternatives (CEA)
3+
#
4+
# Permission is hereby granted, free of charge, to any person obtaining a copy
5+
# of this software and associated documentation files (the "Software"), to deal
6+
# in the Software without restriction, including without limitation the rights
7+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8+
# copies of the Software, and to permit persons to whom the Software is
9+
# furnished to do so, subject to the following conditions:
10+
#
11+
# The above copyright notice and this permission notice shall be included in all
12+
# copies or substantial portions of the Software.
13+
#
14+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20+
# SOFTWARE.
21+
#=============================================================================
22+
23+
cmake_minimum_required(VERSION 3.16)
24+
project(pdi_init LANGUAGES C)
25+
26+
find_package(spdlog)
27+
find_package(MPI REQUIRED COMPONENTS C)
28+
find_package(paraconf 1.0.0 REQUIRED COMPONENTS C)
29+
find_package(PDI 1.9.0 REQUIRED COMPONENTS C)
30+
31+
set(CMAKE_C_STANDARD 99)
32+
33+
add_executable(main main.c)
34+
target_link_libraries(main m MPI::MPI_C paraconf::paraconf PDI::pdi)
35+
36+

hdf5_02/config.yml

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# the alpha parameter
2+
alpha: 0.125
3+
# global data-size (excluding the number of ghost layers for boundary conditions)
4+
global_size: { height: 60, width: 12 }
5+
# degree of parallelism (number of blocks in each dimension)
6+
parallelism: { height: 2, width: 2 }
7+
8+
pdi:
9+
metadata:
10+
local_size: {type: array, subtype: int, size: 2}
11+
rank: int
12+
data:
13+
iteration: int
14+
temp: {type: array, subtype: double, size: ['$local_size[0]', '$local_size[1]']}
15+
plugins:
16+
trace:
17+

hdf5_02/main.c

Lines changed: 253 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,253 @@
1+
/*******************************************************************************
2+
* Copyright (C) 2015-2025 Commissariat a l'energie atomique et aux energies
3+
* alternatives (CEA)
4+
*
5+
* Permission is hereby granted, free of charge, to any person obtaining a copy
6+
* of this software and associated documentation files (the "Software"), to deal
7+
* in the Software without restriction, including without limitation the rights
8+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
* copies of the Software, and to permit persons to whom the Software is
10+
* furnished to do so, subject to the following conditions:
11+
*
12+
* The above copyright notice and this permission notice shall be included in
13+
* all copies or substantial portions of the Software.
14+
*
15+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
* SOFTWARE.
22+
******************************************************************************/
23+
24+
#include <mpi.h>
25+
26+
#include <assert.h>
27+
#include <math.h>
28+
#include <stdio.h>
29+
#include <stdlib.h>
30+
#include <time.h>
31+
32+
#include <paraconf.h>
33+
#include <pdi.h>
34+
35+
// size of the local data as [HEIGHT, WIDTH] including the number of ghost
36+
// layers for communications or boundary conditions
37+
int dsize[2];
38+
39+
// 2D size of the process grid as [HEIGHT, WIDTH]
40+
int psize[2];
41+
42+
// 2D rank of the local process in the process grid as [YY, XX]
43+
int pcoord[2];
44+
45+
// the alpha coefficient used in the computation
46+
double alpha;
47+
48+
double L = 1.0;
49+
// definition of the source
50+
// the source corresponds to a disk of an uniform value
51+
// source1: center=(0.4,0.4), radius=0.2 and value=100
52+
double source1[4] = {0.4, 0.4, 0.2, 100};
53+
// source2: center=(0.8,0.7), radius=0.1 and value=200
54+
double source2[4] = {0.7, 0.8, 0.1, 200};
55+
// the order of the coordinates of the center (XX,YY) is inverted in the vector
56+
57+
/** Initialize all the data to 0, with the exception of each cells
58+
* whose center (cpos_x,cpos_y) is inside of the disks
59+
* defined by source1 or source2
60+
* \param[out] dat the local data to initialize
61+
*/
62+
void init(double dat[dsize[0]][dsize[1]]) {
63+
for (int yy = 0; yy < dsize[0]; ++yy)
64+
for (int xx = 0; xx < dsize[1]; ++xx)
65+
dat[yy][xx] = 0;
66+
double dy = L / ((dsize[0] - 2) * psize[0]);
67+
double dx = L / ((dsize[1] - 2) * psize[1]);
68+
69+
double cpos_x, cpos_y;
70+
double square_dist1, square_dist2;
71+
for (int yy = 0; yy < dsize[0]; ++yy) {
72+
cpos_y = (yy + pcoord[0] * (dsize[0] - 2)) * dy - 0.5 * dy;
73+
for (int xx = 0; xx < dsize[1]; ++xx) {
74+
cpos_x = (xx + pcoord[1] * (dsize[1] - 2)) * dx - 0.5 * dx;
75+
square_dist1 = (cpos_y - source1[0]) * (cpos_y - source1[0]) +
76+
(cpos_x - source1[1]) * (cpos_x - source1[1]);
77+
if (square_dist1 <= source1[2] * source1[2]) {
78+
dat[yy][xx] = source1[3];
79+
}
80+
square_dist2 = (cpos_y - source2[0]) * (cpos_y - source2[0]) +
81+
(cpos_x - source2[1]) * (cpos_x - source2[1]);
82+
if (square_dist2 <= source2[2] * source2[2]) {
83+
dat[yy][xx] = source2[3];
84+
}
85+
}
86+
}
87+
}
88+
89+
/** Compute the values at the next time-step based on the values at the current
90+
* time-step
91+
* \param[in] cur the local data at the current time-step
92+
* \param[out] next the local data at the next time-step
93+
*/
94+
void iter(double cur[dsize[0]][dsize[1]], double next[dsize[0]][dsize[1]]) {
95+
int xx, yy;
96+
for (yy = 1; yy < dsize[0] - 1; ++yy) {
97+
for (xx = 1; xx < dsize[1] - 1; ++xx) {
98+
next[yy][xx] = (1. - 4. * alpha) * cur[yy][xx] +
99+
alpha * (cur[yy][xx - 1] + cur[yy][xx + 1] +
100+
cur[yy - 1][xx] + cur[yy + 1][xx]);
101+
}
102+
}
103+
}
104+
105+
/** Exchange ghost values with neighbours
106+
* \param[in] cart_comm the MPI communicator with all processes organized in a
107+
* 2D Cartesian grid
108+
* \param[in] cur the local data at the current time-step
109+
* whose ghosts need exchanging
110+
*/
111+
void exchange(MPI_Comm cart_comm, double cur[dsize[0]][dsize[1]]) {
112+
MPI_Status status;
113+
int rank_source, rank_dest;
114+
static MPI_Datatype column, row;
115+
static int initialized = 0;
116+
117+
if (!initialized) {
118+
MPI_Type_vector(dsize[0] - 2, 1, dsize[1], MPI_DOUBLE, &column);
119+
MPI_Type_commit(&column);
120+
MPI_Type_contiguous(dsize[1] - 2, MPI_DOUBLE, &row);
121+
MPI_Type_commit(&row);
122+
initialized = 1;
123+
}
124+
125+
// send down
126+
MPI_Cart_shift(cart_comm, 0, 1, &rank_source, &rank_dest);
127+
MPI_Sendrecv(&cur[dsize[0] - 2][1], 1, row, rank_dest, 100, // send row before ghost
128+
&cur[0][1], 1, row, rank_source, 100, // receive 1st row (ghost)
129+
cart_comm, &status);
130+
131+
// send up
132+
MPI_Cart_shift(cart_comm, 0, -1, &rank_source, &rank_dest);
133+
MPI_Sendrecv(&cur[1][1], 1, row, rank_dest, 100, // send row after ghost
134+
&cur[dsize[0] - 1][1], 1, row, rank_source, 100, // receive last row (ghost)
135+
cart_comm, &status);
136+
137+
// send to the right
138+
MPI_Cart_shift(cart_comm, 1, 1, &rank_source, &rank_dest);
139+
MPI_Sendrecv(&cur[1][dsize[1] - 2], 1, column, rank_dest, 100, // send column before ghost
140+
&cur[1][0], 1, column, rank_source, 100, // receive 1st column (ghost)
141+
cart_comm, &status);
142+
143+
// send to the left
144+
MPI_Cart_shift(cart_comm, 1, -1, &rank_source, &rank_dest);
145+
MPI_Sendrecv(&cur[1][1], 1, column, rank_dest, 100, // send column after ghost
146+
&cur[1][dsize[1] - 1], 1, column, rank_source, 100, // receive last column (ghost)
147+
cart_comm, &status);
148+
}
149+
150+
int main(int argc, char *argv[]) {
151+
MPI_Init(&argc, &argv);
152+
153+
// load the configuration tree
154+
PC_tree_t conf = PC_parse_path("config.yml");
155+
PDI_init(PC_get(conf, ".pdi"));
156+
157+
// NEVER USE MPI_COMM_WORLD IN THE CODE, use our own communicator main_comm
158+
// instead
159+
MPI_Comm main_comm = MPI_COMM_WORLD;
160+
161+
// load the MPI rank & size
162+
int psize_1d;
163+
MPI_Comm_size(main_comm, &psize_1d);
164+
int pcoord_1d;
165+
MPI_Comm_rank(main_comm, &pcoord_1d);
166+
167+
long longval;
168+
169+
// load the alpha parameter
170+
PC_double(PC_get(conf, ".alpha"), &alpha);
171+
172+
int global_size[2];
173+
// load the global data-size
174+
// you can use paraconf to read some parameters from the yml config file
175+
PC_int(PC_get(conf, ".global_size.height"), &longval);
176+
global_size[0] = longval;
177+
PC_int(PC_get(conf, ".global_size.width"), &longval);
178+
global_size[1] = longval;
179+
180+
// load the parallelism configuration
181+
PC_int(PC_get(conf, ".parallelism.height"), &longval);
182+
psize[0] = longval;
183+
PC_int(PC_get(conf, ".parallelism.width"), &longval);
184+
psize[1] = longval;
185+
186+
// check the configuration is coherent
187+
assert(global_size[0] % psize[0] == 0);
188+
assert(global_size[1] % psize[1] == 0);
189+
assert(psize[1] * psize[0] == psize_1d);
190+
191+
// compute the local data-size (the number of ghost layers is 2 for each
192+
// coordinate)
193+
dsize[0] = global_size[0] / psize[0] + 2;
194+
dsize[1] = global_size[1] / psize[1] + 2;
195+
196+
PDI_expose("local_size", dsize, PDI_OUT);
197+
PDI_expose("rank", &pcoord_1d, PDI_OUT);
198+
199+
// create a 2D Cartesian MPI communicator & get our coordinate (rank) in it
200+
int cart_period[2] = {1, 1};
201+
MPI_Comm cart_comm;
202+
MPI_Cart_create(main_comm, 2, psize, cart_period, 1, &cart_comm);
203+
MPI_Cart_coords(cart_comm, pcoord_1d, 2, pcoord);
204+
205+
// allocate memory for the double buffered data
206+
double(*cur)[dsize[1]] = malloc(sizeof(double) * dsize[1] * dsize[0]);
207+
double(*next)[dsize[1]] = malloc(sizeof(double) * dsize[1] * dsize[0]);
208+
209+
// initialize the data content
210+
init(cur);
211+
212+
// our loop counter so as to be able to use it outside the loop
213+
int ii = 0;
214+
215+
// the main loop
216+
for (; ii < 10; ++ii) {
217+
218+
PDI_multi_expose("loop",
219+
"iteration", &ii, PDI_INOUT,
220+
"temp", cur, PDI_INOUT,
221+
NULL);
222+
223+
// compute the values for the next iteration
224+
iter(cur, next);
225+
226+
// exchange data with the neighbours
227+
exchange(cart_comm, next);
228+
229+
// swap the current and next values
230+
double(*tmp)[dsize[1]] = cur;
231+
cur = next;
232+
next = tmp;
233+
}
234+
235+
PDI_multi_expose("loop",
236+
"iteration", &ii, PDI_INOUT,
237+
"temp", cur, PDI_INOUT,
238+
NULL);
239+
240+
PDI_finalize();
241+
// destroy the paraconf configuration tree
242+
PC_tree_destroy(&conf);
243+
244+
// free the allocated memory
245+
free(cur);
246+
free(next);
247+
248+
// finalize MPI
249+
MPI_Finalize();
250+
251+
fprintf(stderr, "[%d] SUCCESS\n", pcoord_1d);
252+
return EXIT_SUCCESS;
253+
}

hdf5_02/solution/CMakeLists.txt

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#=============================================================================
2+
# Copyright (C) 2015-2023 Commissariat a l'energie atomique et aux energies alternatives (CEA)
3+
#
4+
# Permission is hereby granted, free of charge, to any person obtaining a copy
5+
# of this software and associated documentation files (the "Software"), to deal
6+
# in the Software without restriction, including without limitation the rights
7+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8+
# copies of the Software, and to permit persons to whom the Software is
9+
# furnished to do so, subject to the following conditions:
10+
#
11+
# The above copyright notice and this permission notice shall be included in all
12+
# copies or substantial portions of the Software.
13+
#
14+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20+
# SOFTWARE.
21+
#=============================================================================
22+
23+
cmake_minimum_required(VERSION 3.16)
24+
project(pdi_init LANGUAGES C)
25+
26+
find_package(spdlog)
27+
find_package(MPI REQUIRED COMPONENTS C)
28+
find_package(paraconf 1.0.0 REQUIRED COMPONENTS C)
29+
find_package(PDI 1.9.0 REQUIRED COMPONENTS C)
30+
31+
set(CMAKE_C_STANDARD 99)
32+
33+
add_executable(main main.c)
34+
target_link_libraries(main m MPI::MPI_C paraconf::paraconf PDI::pdi)
35+
36+

hdf5_02/solution/config.yml

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# the alpha parameter
2+
alpha: 0.125
3+
# global data-size (excluding the number of ghost layers for boundary conditions)
4+
global_size: { height: 60, width: 12 }
5+
# degree of parallelism (number of blocks in each dimension)
6+
parallelism: { height: 2, width: 2 }
7+
8+
pdi:
9+
metadata:
10+
local_size: {type: array, subtype: int, size: 2}
11+
rank: int
12+
pcoord: {type: array, subtype: int, size: 2}
13+
psize: {type: array, subtype: int, size: 2}
14+
data:
15+
iteration: int
16+
temp: {type: array, subtype: double, size: ['$local_size[0]', '$local_size[1]']}
17+
plugins:
18+
trace:
19+
mpi:
20+
decl_hdf5:
21+
- file: output_iter${iteration:02}.h5
22+
communicator: $MPI_COMM_WORLD
23+
datasets:
24+
temp: {type: array, subtype: double, size: ['$psize[0]*($local_size[0]-2)', '$psize[1]*($local_size[1]-2)' ] }
25+
on_event: loop
26+
write:
27+
temp:
28+
memory_selection:
29+
size: ['$local_size[0]-2', '$local_size[1]-2' ] # number of elements to transfer in each dimension
30+
start: [1, 1]
31+
dataset_selection:
32+
size: ['$local_size[0]-2', '$local_size[1]-2' ] # number of elements to transfer in each dimension
33+
start: ['$pcoord[0]*($local_size[0]-2)', '$pcoord[1]*($local_size[1]-2)']

0 commit comments

Comments
 (0)