Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
O
openfpm_vcluster
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
Requirements
Requirements
List
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Operations
Operations
Environments
Analytics
Analytics
CI / CD
Code Review
Insights
Issue
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
openfpm
openfpm_vcluster
Commits
26e5bb68
Commit
26e5bb68
authored
Jul 08, 2015
by
incardon
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Refactoring
parent
c9fae2a0
Changes
13
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
2865 additions
and
0 deletions
+2865
-0
src/MPI_wrapper/MPI_IallreduceW.hpp
src/MPI_wrapper/MPI_IallreduceW.hpp
+224
-0
src/MPI_wrapper/MPI_IrecvW.hpp
src/MPI_wrapper/MPI_IrecvW.hpp
+151
-0
src/MPI_wrapper/MPI_IsendW.hpp
src/MPI_wrapper/MPI_IsendW.hpp
+150
-0
src/MPI_wrapper/MPI_util.hpp
src/MPI_wrapper/MPI_util.hpp
+40
-0
src/unused/ComUnit.cpp
src/unused/ComUnit.cpp
+128
-0
src/unused/ComUnit.hpp
src/unused/ComUnit.hpp
+44
-0
src/unused/TThread.cpp
src/unused/TThread.cpp
+244
-0
src/unused/TThread.h
src/unused/TThread.h
+204
-0
src/unused/TThreadPool.cpp
src/unused/TThreadPool.cpp
+418
-0
src/unused/TThreadPool.h
src/unused/TThreadPool.h
+144
-0
src/unused/ThreadWorker.cpp
src/unused/ThreadWorker.cpp
+498
-0
src/unused/ThreadWorker.h
src/unused/ThreadWorker.h
+252
-0
src/unused/sll.h
src/unused/sll.h
+368
-0
No files found.
src/MPI_wrapper/MPI_IallreduceW.hpp
0 → 100644
View file @
26e5bb68
#ifndef MPI_IALLREDUCEW_HPP
#define MPI_IALLREDUCEW_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General reduction
*
* \tparam any type
*
*/
template
<
typename
T
>
class
MPI_IallreduceW
{
public:
static
inline
void
reduce
(
T
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
std
::
cerr
<<
"Error: "
<<
__FILE__
<<
":"
<<
__LINE__
<<
" cannot recognize "
<<
typeid
(
T
).
name
()
<<
"
\n
"
;
}
};
/*! \brief specialization for integer
*
*/
template
<
>
class
MPI_IallreduceW
<
int
>
{
public:
static
inline
void
reduce
(
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_INT
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for unsigned integer
*
*/
template
<
>
class
MPI_IallreduceW
<
unsigned
int
>
{
public:
static
inline
void
reduce
(
unsigned
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for short
*
*/
template
<
>
class
MPI_IallreduceW
<
short
>
{
public:
static
inline
void
reduce
(
short
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_SHORT
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for short
*
*/
template
<
>
class
MPI_IallreduceW
<
unsigned
short
>
{
public:
static
inline
void
reduce
(
unsigned
short
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_SHORT
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for char
*
*/
template
<
>
class
MPI_IallreduceW
<
char
>
{
public:
static
inline
void
reduce
(
char
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_CHAR
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for char
*
*/
template
<
>
class
MPI_IallreduceW
<
unsigned
char
>
{
public:
static
inline
void
reduce
(
unsigned
char
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_CHAR
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for size_t
*
*/
template
<
>
class
MPI_IallreduceW
<
size_t
>
{
public:
static
inline
void
reduce
(
size_t
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_LONG
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for size_t
*
*/
template
<
>
class
MPI_IallreduceW
<
long
int
>
{
public:
static
inline
void
reduce
(
long
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_LONG
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for float
*
*/
template
<
>
class
MPI_IallreduceW
<
float
>
{
public:
static
inline
void
reduce
(
float
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_FLOAT
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for double
*
*/
template
<
>
class
MPI_IallreduceW
<
double
>
{
public:
static
inline
void
reduce
(
double
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_DOUBLE
,
op
,
MPI_COMM_WORLD
,
&
req
));
}
};
////////////////// Specialization for vectors ///////////////
/*! \brief specialization for vector integer
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<int>>
{
public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector short
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<short>>
{
public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector char
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<char>>
{
public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector size_t
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{
public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector float
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<float>>
{
public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op, MPI_COMM_WORLD,&req);
}
};*/
/*! \brief specialization for vector double
*
*/
/*template<> class MPI_IallreduceW<openfpm::vector<double>>
{
public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op, MPI_COMM_WORLD,&req);
}
};*/
#endif
src/MPI_wrapper/MPI_IrecvW.hpp
0 → 100644
View file @
26e5bb68
#ifndef MPI_IRECV_HPP
#define MPI_IRECV_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General send
*
* \tparam any type
*
*/
template
<
typename
T
>
class
MPI_IrecvW
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
T
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for vector of integer
*
*/
template
<
>
class
MPI_IrecvW
<
int
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
int
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for unsigned integer
*
*/
template
<
>
class
MPI_IrecvW
<
unsigned
int
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
int
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for short
*
*/
template
<
>
class
MPI_IrecvW
<
short
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
short
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for short
*
*/
template
<
>
class
MPI_IrecvW
<
unsigned
short
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
short
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for char
*
*/
template
<
>
class
MPI_IrecvW
<
char
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
char
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for char
*
*/
template
<
>
class
MPI_IrecvW
<
unsigned
char
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
char
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for size_t
*
*/
template
<
>
class
MPI_IrecvW
<
size_t
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
size_t
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for size_t
*
*/
template
<
>
class
MPI_IrecvW
<
long
int
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
long
int
>
&
v
,
MPI_Request
&
req
)
{
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for float
*
*/
template
<
>
class
MPI_IrecvW
<
float
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
float
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
/*! \brief specialization for double
*
*/
template
<
>
class
MPI_IrecvW
<
double
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
double
>
&
v
,
MPI_Request
&
req
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
}
};
#endif
src/MPI_wrapper/MPI_IsendW.hpp
0 → 100644
View file @
26e5bb68
#ifndef MPI_ISEND_HPP
#define MPI_ISEND_HPP
#include <mpi.h>
/*! \brief Set of wrapping classing for MPI_Iallreduce
*
* The purpose of these classes is to correctly choose the right call based on the type we want to reduce
*
*/
/*! \brief General send
*
* \tparam any type
*
*/
template
<
typename
T
>
class
MPI_IsendW
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
T
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for vector of integer
*
*/
template
<
>
class
MPI_IsendW
<
int
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
int
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for unsigned integer
*
*/
template
<
>
class
MPI_IsendW
<
unsigned
int
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
int
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for short
*
*/
template
<
>
class
MPI_IsendW
<
short
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
short
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for short
*
*/
template
<
>
class
MPI_IsendW
<
unsigned
short
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
short
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for char
*
*/
template
<
>
class
MPI_IsendW
<
char
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
char
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for char
*
*/
template
<
>
class
MPI_IsendW
<
unsigned
char
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
char
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for size_t
*
*/
template
<
>
class
MPI_IsendW
<
size_t
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
size_t
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for size_t
*
*/
template
<
>
class
MPI_IsendW
<
long
int
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
long
int
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for float
*
*/
template
<
>
class
MPI_IsendW
<
float
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
float
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
/*! \brief specialization for double
*
*/
template
<
>
class
MPI_IsendW
<
double
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
double
>
&
v
,
MPI_Request
&
req
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
}
};
#endif
src/MPI_wrapper/MPI_util.hpp
0 → 100644
View file @
26e5bb68
/*
* MPI_util.hpp
*
* Created on: Jul 7, 2015
* Author: Pietro Incardona
*/
#ifndef MPI_UTIL_HPP_
#define MPI_UTIL_HPP_
/*! \brief From an MPI error it print an human readable message
*
* \param error_code
*
*/
static
void
error_handler
(
int
error_code
)
{
int
rank
;
char
error_string
[
BUFSIZ
];
int
length_of_error_string
,
error_class
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Error_class
(
error_code
,
&
error_class
);
MPI_Error_string
(
error_class
,
error_string
,
&
length_of_error_string
);
std
::
cerr
<<
rank
<<
": "
<<
error_string
;
MPI_Error_string
(
error_code
,
error_string
,
&
length_of_error_string
);
std
::
cerr
<<
rank
<<
": "
<<
error_string
;
}
#define MPI_SAFE_CALL(call) {\
int err = call;\
if (MPI_SUCCESS != err) {\
std::cerr << "MPI error: "<< __FILE__ << " " << __LINE__ << "\n";\
error_handler(err);\
}\
}
#endif
/* MPI_UTIL_HPP_ */
src/unused/ComUnit.cpp
0 → 100644
View file @
26e5bb68
#include "ComUnit.hpp"
#define SERVICE_TAG 0xFFFFFFF
/*! \brief Send some data globally to one processor when the other side
* do not know about the other side
*
* Send some data globally to one processor when the other side
* do not know about the other side
*
* \Warning if you already call this function with p, will overwrite the request
*
* \param p is the processor number
* \param buf is the buffer pointer
* \param sz is the size of the communication
*
*/
bool
SentToU
(
size_t
p
,
void
*
buf
,
size_t
sz
)
{
// before complete the communication we have to notify to the other
// processor that we have some data to send.
if
(
p
>=
comReq
.
size
())
{
std
::
cerr
<<
"Error: file: "
<<
__FILE__
<<
" line: "
<<
__LINE__
<<
" processor "
<<
p
<<
" does not exist"
;
return
false
;
}
return
true
;
}
/*! \brief Send some data locally (to its neighborhood) to one processor
*
* Send some data locally to one processor
*
*/
bool
SendToNU
(
void
*
buf
,
size_t
sz
)
{
return
true
;
}
/*! \brief Send some data globally to one processor when the other side
* know about the other side
*
* Send some data globally to one processor when the other side
* know about the other side
*
* \Warning if you already call this function with p, will overwrite the request
*
* \param p is the processor number
* \param buf is the buffer pointer
* \param sz is the size of the communication
*
*/
bool
SendTo
(
size_t
p
,
void
*
buf
,
size_t
sz
)
{
MPI_ISend
(
p
,
buf
,
sz
);
}
/*! \brief Wait for all communication to complete
*
* Wait for all communication to complete
*
* \return true if no error occur
*
*/
bool
wait
()
{