Skip to content

Commit 18adc16

Browse files
authored
Merge pull request #7 from learning-process/01-update
Add content for 01-intro
2 parents c0e3635 + e5b6e1c commit 18adc16

File tree

1 file changed

+143
-0
lines changed

1 file changed

+143
-0
lines changed

01-intro.tex

+143
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,35 @@
6969
% Section
7070
\section{Introduction to MPI}
7171

72+
\begin{frame}[fragile]{What is MPI?}
73+
MPI (Message Passing Interface) is a standardized and portable message-passing system, designed to function on a variety of parallel computing architectures.
74+
75+
Primarily used in high-performance computing (HPC) to allow different processes to communicate with each other in a distributed memory environment.
76+
\end{frame}
77+
78+
\begin{frame}[fragile]{MPI: library vs standard}
79+
\begin{table}[h!]
80+
\begin{tabular}{| p{2.1cm} | p{4.2 cm} | p{4.2 cm} |}
81+
\hline
82+
\textbf{Aspect} & \textbf{MPI Standard} & \textbf{MPI Library} \\
83+
\hline
84+
\textbf{Definition} & A formal set of specifications & A concrete software implementation \\
85+
\hline
86+
\textbf{Purpose} & Defines the behavior of message-passing systems & Provides a runnable implementation of the standard \\
87+
\hline
88+
\textbf{Portability} & Platform-agnostic guidelines & Implementations may be platform-specific \\
89+
\hline
90+
\textbf{Performance} & No direct impact on performance & Optimized for different platforms and hardware \\
91+
\hline
92+
\textbf{Examples} & MPI-1, MPI-2, MPI-3, MPI-4 (specifications) & MPICH, Open MPI, Intel MPI \\
93+
\hline
94+
\end{tabular}
95+
\caption{Key Differences Between MPI Standard and MPI Library}
96+
\end{table}
97+
\end{frame}
98+
99+
\section{"Hello, World" in MPI}
100+
72101
% "Hello, World" in MPI
73102
\begin{frame}[fragile]{"Hello, World" in MPI}
74103

@@ -103,6 +132,120 @@ \section{Introduction to MPI}
103132

104133
\end{frame}
105134

135+
\begin{frame}[fragile]{Compiling MPI application}
136+
Linux: \\
137+
\texttt{mpicc -o hello\_mpi hello\_mpi.c}
138+
139+
Windows: \\
140+
\texttt{cl /I"C:}\texttt{\textbackslash}\texttt{Program Files (x86)}\texttt{\textbackslash}\texttt{Microsoft SDKs}\texttt{\textbackslash}\texttt{MPI}\texttt{\textbackslash}\texttt{Include" hello\_mpi.c /link /LIBPATH:"C:}\texttt{\textbackslash}\texttt{Program Files (x86)}\texttt{\textbackslash}\texttt{Microsoft SDKs}\texttt{\textbackslash}\texttt{MPI}\texttt{\textbackslash}\texttt{Lib}\texttt{\textbackslash}\texttt{x64" msmpi.lib}
141+
142+
\end{frame}
143+
144+
\begin{frame}[fragile]{Running MPI application}
145+
Important! If you run application directly (\texttt{./hello\_mpi}) you will not get expected result!
146+
147+
Linux: \\
148+
\texttt{mpiexec -n 4 ./hello\_mpi}
149+
150+
Windows: \\
151+
\texttt{mpirun -n 4 hello\_mpi.exe}
152+
\end{frame}
153+
154+
\section{Brief API calls overview}
155+
156+
\begin{frame}[fragile]{MPI initialization: \texttt{MPI\_Init()}}
157+
\texttt{int MPI\_Init(int *argc, char ***argv)}
158+
159+
It initializes the MPI environment and must be called before any other MPI function.
160+
161+
\lstset{style=CStyle, caption=Basic application written using MPI}
162+
\begin{lstlisting}
163+
#include <mpi.h>
164+
#include <stdio.h>
165+
166+
int main(int argc, char** argv) {
167+
// Initialize the MPI environment
168+
MPI_Init(&argc, &argv);
169+
170+
...
171+
172+
return 0;
173+
}
174+
\end{lstlisting}
175+
176+
\end{frame}
177+
178+
\section{MPI data distribution}
179+
180+
\begin{frame}[fragile]{MPI data distribution example}
181+
\lstset{style=CStyle, caption=MPI data distribution example}
182+
\begin{lstlisting}
183+
#include <mpi.h>
184+
#include <stdio.h>
185+
int main(int argc, char** argv) {
186+
// Initialize the MPI environment
187+
MPI_Init(&argc, &argv);
188+
189+
// Get the number of processes
190+
int world_size;
191+
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
192+
193+
// Get the rank of the process
194+
int world_rank;
195+
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
196+
197+
// Define message
198+
int number;
199+
if (world_rank == 0) {
200+
// If we are rank 0, set number to -1 and send it to process 1
201+
number = -1;
202+
MPI_Send(&number, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
203+
printf("Process 0 sent number %d to process 1\n", number);
204+
} else if (world_rank == 1) {
205+
// If we are rank 1, receive the number from process 0
206+
MPI_Recv(&number, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
207+
printf("Process 1 received number %d from process 0\n", number);
208+
}
209+
210+
// Finalize the MPI environment
211+
MPI_Finalize();
212+
return 0;
213+
}
214+
\end{lstlisting}
215+
216+
\end{frame}
217+
218+
\begin{frame}[fragile]{\texttt{MPI\_Send()}}
219+
\texttt{int MPI\_Send(const void *buf, int count, MPI\_Datatype datatype, int dest, int tag, MPI\_Comm comm)}
220+
221+
Parameters:
222+
223+
\begin{itemize}
224+
\item buf: The starting address of the data buffer to be sent.
225+
\item count: The number of elements in the buffer.
226+
\item datatype: The type of data being sent (e.g., \texttt{MPI\_INT}, \texttt{MPI\_FLOAT}).
227+
\item dest: The rank (ID) of the destination process.
228+
\item tag: A user-defined message identifier to differentiate messages.
229+
\item comm: The communicator that defines the group of processes within which the message is being sent (e.g., \texttt{MPI\_COMM\_WORLD}).
230+
\end{itemize}
231+
\end{frame}
232+
233+
\begin{frame}[fragile]{\texttt{MPI\_Recv()}}
234+
\texttt{int MPI\_Recv(void *buf, int count, MPI\_Datatype datatype, int source, int tag, MPI\_Comm comm, MPI\_Status *status)}
235+
236+
Parameters:
237+
238+
\begin{itemize}
239+
\item buf: The starting address of the buffer where the received data will be stored.
240+
\item count: The maximum number of elements that the buffer can hold.
241+
\item datatype: The type of data being received (e.g., \texttt{MPI\_INT}, \texttt{MPI\_FLOAT}).
242+
\item source: The rank of the sending process. Use \texttt{MPI\_ANY\_SOURCE} to receive from any process.
243+
\item tag: The message identifier (tag). Use \texttt{MPI\_ANY\_TAG} to receive any message regardless of the tag.
244+
\item comm: The communicator for the group of processes within which the message is being received (e.g., \texttt{MPI\_COMM\_WORLD}).
245+
\item status: A structure that contains information about the received message, such as the actual source and tag.
246+
\end{itemize}
247+
\end{frame}
248+
106249
\begin{frame}[fragile]{Performance measurement in MPI: \texttt{MPI\_Wtime()}}
107250
\texttt{double MPI\_Wtime(void)}
108251

0 commit comments

Comments
 (0)