Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Sign in
Toggle navigation
O
openfpm_vcluster
Project overview
Project overview
Details
Activity
Releases
Cycle Analytics
Insights
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Security & Compliance
Security & Compliance
Dependency List
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
openfpm
openfpm_vcluster
Compare Revisions
master...insitu_visualization
Source
insitu_visualization
Select Git revision
...
Target
master
Select Git revision
Compare
Commits (5)
Fixing in situ visualization
· 09815c24
incardon
authored
Dec 05, 2018
09815c24
Fixing insitu when PETSC not available
· 8e5ec9fc
incardon
authored
Dec 05, 2018
8e5ec9fc
insitu now is merged with 2.0.0
· 05bd067e
incardon
authored
Apr 17, 2019
05bd067e
Added shared memory support
· 9f274001
incardon
authored
Apr 18, 2019
9f274001
Adding shared memory files
· 8b660cc6
incardon
authored
Aug 13, 2019
8b660cc6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
460 additions
and
366 deletions
+460
-366
INSTALL
INSTALL
+160
-162
src/CMakeLists.txt
src/CMakeLists.txt
+1
-1
src/MPI_wrapper/MPI_IAllGather.hpp
src/MPI_wrapper/MPI_IAllGather.hpp
+24
-24
src/MPI_wrapper/MPI_IBcastW.hpp
src/MPI_wrapper/MPI_IBcastW.hpp
+38
-32
src/MPI_wrapper/MPI_IallreduceW.hpp
src/MPI_wrapper/MPI_IallreduceW.hpp
+33
-33
src/MPI_wrapper/MPI_IrecvW.hpp
src/MPI_wrapper/MPI_IrecvW.hpp
+24
-24
src/MPI_wrapper/MPI_IsendW.hpp
src/MPI_wrapper/MPI_IsendW.hpp
+24
-24
src/VCluster/VCluster.cpp
src/VCluster/VCluster.cpp
+2
-0
src/VCluster/VCluster.hpp
src/VCluster/VCluster.hpp
+103
-38
src/VCluster/VCluster_base.hpp
src/VCluster/VCluster_base.hpp
+22
-19
src/VCluster/VCluster_unit_test_util.hpp
src/VCluster/VCluster_unit_test_util.hpp
+2
-6
src/VCluster/VCluster_unit_tests.cpp
src/VCluster/VCluster_unit_tests.cpp
+27
-3
No files found.
INSTALL
View file @
8b660cc6
This diff is collapsed.
Click to expand it.
src/CMakeLists.txt
View file @
8b660cc6
...
...
@@ -6,7 +6,7 @@ if (CUDA_FOUND)
set
(
CUDA_SOURCES ../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu VCluster/cuda/VCluster_unit_tests.cu
)
endif
()
add_executable
(
vcluster_test main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp
${
CUDA_SOURCES
}
)
add_executable
(
vcluster_test main.cpp VCluster/VCluster.cpp
../../openfpm_devices/src/memory/ShmAllocator_manager.cpp ../../openfpm_devices/src/memory/SemManager.cpp ../../openfpm_devices/src/memory/ShmAllocator.cpp
../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp VCluster/VCluster_unit_tests.cpp VCluster/VCluster_semantic_unit_tests.cpp
${
CUDA_SOURCES
}
)
if
(
CMAKE_COMPILER_IS_GNUCC
)
target_compile_options
(
vcluster_test PRIVATE
"-Wno-deprecated-declarations"
)
...
...
src/MPI_wrapper/MPI_IAllGather.hpp
View file @
8b660cc6
...
...
@@ -28,9 +28,9 @@
class
MPI_IAllGatherWB
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_BYTE
,
rbuf
,
sz_r
,
MPI_BYTE
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_BYTE
,
rbuf
,
sz_r
,
MPI_BYTE
,
ext_comm
,
&
req
));
}
};
...
...
@@ -44,9 +44,9 @@ template<typename T> class MPI_IAllGatherW
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sizeof
(
T
)
*
sz_s
,
MPI_BYTE
,
rbuf
,
sz_r
*
sizeof
(
T
),
MPI_BYTE
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sizeof
(
T
)
*
sz_s
,
MPI_BYTE
,
rbuf
,
sz_r
*
sizeof
(
T
),
MPI_BYTE
,
ext_comm
,
&
req
));
}
};
...
...
@@ -57,9 +57,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
int
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_INT
,
rbuf
,
sz_r
,
MPI_INT
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_INT
,
rbuf
,
sz_r
,
MPI_INT
,
ext_comm
,
&
req
));
}
};
...
...
@@ -69,9 +69,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
unsigned
int
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED
,
rbuf
,
sz_r
,
MPI_UNSIGNED
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED
,
rbuf
,
sz_r
,
MPI_UNSIGNED
,
ext_comm
,
&
req
));
}
};
...
...
@@ -81,9 +81,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
short
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_SHORT
,
rbuf
,
sz_r
,
MPI_SHORT
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_SHORT
,
rbuf
,
sz_r
,
MPI_SHORT
,
ext_comm
,
&
req
));
}
};
...
...
@@ -94,9 +94,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
unsigned
short
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED_SHORT
,
rbuf
,
sz_r
,
MPI_UNSIGNED_SHORT
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED_SHORT
,
rbuf
,
sz_r
,
MPI_UNSIGNED_SHORT
,
ext_comm
,
&
req
));
}
};
...
...
@@ -107,9 +107,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
char
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_CHAR
,
rbuf
,
sz_r
,
MPI_CHAR
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_CHAR
,
rbuf
,
sz_r
,
MPI_CHAR
,
ext_comm
,
&
req
));
}
};
...
...
@@ -120,9 +120,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
unsigned
char
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED_CHAR
,
rbuf
,
sz_r
,
MPI_UNSIGNED_CHAR
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED_CHAR
,
rbuf
,
sz_r
,
MPI_UNSIGNED_CHAR
,
ext_comm
,
&
req
));
}
};
...
...
@@ -132,9 +132,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
size_t
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED_LONG
,
rbuf
,
sz_r
,
MPI_UNSIGNED_LONG
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_UNSIGNED_LONG
,
rbuf
,
sz_r
,
MPI_UNSIGNED_LONG
,
ext_comm
,
&
req
));
}
};
...
...
@@ -144,9 +144,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
long
int
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_LONG
,
rbuf
,
sz_r
,
MPI_LONG
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_LONG
,
rbuf
,
sz_r
,
MPI_LONG
,
ext_comm
,
&
req
));
}
};
...
...
@@ -156,9 +156,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
float
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_FLOAT
,
rbuf
,
sz_r
,
MPI_FLOAT
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_FLOAT
,
rbuf
,
sz_r
,
MPI_FLOAT
,
ext_comm
,
&
req
));
}
};
...
...
@@ -168,9 +168,9 @@ public:
template
<>
class
MPI_IAllGatherW
<
double
>
{
public:
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
)
static
inline
void
gather
(
void
*
sbuf
,
size_t
sz_s
,
void
*
rbuf
,
size_t
sz_r
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_DOUBLE
,
rbuf
,
sz_r
,
MPI_DOUBLE
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallgather
(
sbuf
,
sz_s
,
MPI_DOUBLE
,
rbuf
,
sz_r
,
MPI_DOUBLE
,
ext_comm
,
&
req
));
}
};
...
...
src/MPI_wrapper/MPI_IBcastW.hpp
View file @
8b660cc6
...
...
@@ -33,9 +33,9 @@
class
MPI_IBcastWB
{
public:
static
inline
void
bcast
(
size_t
proc
,
void
*
buf
,
size_t
sz
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
void
*
buf
,
size_t
sz
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
buf
,
sz
,
MPI_BYTE
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
buf
,
sz
,
MPI_BYTE
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -48,9 +48,9 @@ public:
template
<
typename
T
>
class
MPI_IBcastW
{
public:
template
<
typename
Memory
>
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
T
,
Memory
>
&
v
,
MPI_Request
&
req
)
template
<
typename
Memory
>
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
T
,
Memory
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -61,9 +61,9 @@ public:
template
<>
class
MPI_IBcastW
<
int
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
int
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
int
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -73,9 +73,9 @@ public:
template
<>
class
MPI_IBcastW
<
unsigned
int
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
unsigned
int
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
unsigned
int
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -85,9 +85,9 @@ public:
template
<>
class
MPI_IBcastW
<
short
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
short
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
short
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -97,9 +97,9 @@ public:
template
<>
class
MPI_IBcastW
<
unsigned
short
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
unsigned
short
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
unsigned
short
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -109,9 +109,9 @@ public:
template
<>
class
MPI_IBcastW
<
char
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
char
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
char
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -121,9 +121,9 @@ public:
template
<>
class
MPI_IBcastW
<
unsigned
char
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
unsigned
char
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
unsigned
char
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -133,9 +133,9 @@ public:
template
<>
class
MPI_IBcastW
<
size_t
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
size_t
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
size_t
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -145,9 +145,9 @@ public:
template
<>
class
MPI_IBcastW
<
long
int
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
long
int
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
long
int
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -157,9 +157,9 @@ public:
template
<>
class
MPI_IBcastW
<
float
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
float
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
float
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -169,9 +169,9 @@ public:
template
<>
class
MPI_IBcastW
<
double
>
{
public:
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
double
>
&
v
,
MPI_Request
&
req
)
static
inline
void
bcast
(
size_t
proc
,
openfpm
::
vector
<
double
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Ibcast
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
ext_comm
,
&
req
));
}
};
...
...
@@ -195,15 +195,19 @@ struct bcast_inte_impl
//! root processor
size_t
root
;
//! MPI communicator
MPI_Comm
ext_comm
;
/*! \brief constructor
*
* \param v set of pointer buffers to set
*
*/
inline
bcast_inte_impl
(
vect
&
send
,
openfpm
::
vector
<
MPI_Request
>
&
req
,
size_t
root
)
:
send
(
send
),
req
(
req
),
root
(
root
)
openfpm
::
vector
<
MPI_Request
>
&
req
,
size_t
root
,
MPI_Comm
ext_comm
)
:
send
(
send
),
req
(
req
),
root
(
root
),
ext_comm
(
ext_comm
)
{};
//! It call the copy function for each property
...
...
@@ -216,7 +220,7 @@ struct bcast_inte_impl
req
.
add
();
// gather
MPI_IBcastWB
::
bcast
(
root
,
&
send
.
template
get
<
T
::
value
>
(
0
),
send
.
size
()
*
sizeof
(
send_type
),
req
.
last
());
MPI_IBcastWB
::
bcast
(
root
,
&
send
.
template
get
<
T
::
value
>
(
0
),
send
.
size
()
*
sizeof
(
send_type
),
req
.
last
()
,
ext_comm
);
}
};
...
...
@@ -226,13 +230,14 @@ struct b_cast_helper
template
<
typename
T
,
typename
Mem
,
typename
lt_type
,
template
<
typename
>
class
layout_base
>
static
void
bcast_
(
openfpm
::
vector
<
MPI_Request
>
&
req
,
openfpm
::
vector
<
T
,
Mem
,
lt_type
,
layout_base
>
&
v
,
size_t
root
)
size_t
root
,
MPI_Comm
ext_comm
)
{
// Create one request
req
.
add
();
// gather
MPI_IBcastW
<
T
>::
bcast
(
root
,
v
,
req
.
last
());
MPI_IBcastW
<
T
>::
bcast
(
root
,
v
,
req
.
last
()
,
ext_comm
);
}
};
...
...
@@ -242,9 +247,10 @@ struct b_cast_helper<false>
template
<
typename
T
,
typename
Mem
,
typename
lt_type
,
template
<
typename
>
class
layout_base
>
static
void
bcast_
(
openfpm
::
vector
<
MPI_Request
>
&
req
,
openfpm
::
vector
<
T
,
Mem
,
lt_type
,
layout_base
>
&
v
,
size_t
root
)
size_t
root
,
MPI_Comm
ext_comm
)
{
bcast_inte_impl
<
openfpm
::
vector
<
T
,
Mem
,
lt_type
,
layout_base
>>
bc
(
v
,
req
,
root
);
bcast_inte_impl
<
openfpm
::
vector
<
T
,
Mem
,
lt_type
,
layout_base
>>
bc
(
v
,
req
,
root
,
ext_comm
);
boost
::
mpl
::
for_each_ref
<
boost
::
mpl
::
range_c
<
int
,
0
,
T
::
max_prop
>>
(
bc
);
}
...
...
src/MPI_wrapper/MPI_IallreduceW.hpp
View file @
8b660cc6
...
...
@@ -19,7 +19,7 @@
template
<
typename
T
>
class
MPI_IallreduceW
{
public:
static
inline
void
reduce
(
T
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
T
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
std
::
cerr
<<
"Error: "
<<
__FILE__
<<
":"
<<
__LINE__
<<
" cannot recognize "
<<
typeid
(
T
).
name
()
<<
"
\n
"
;
}
...
...
@@ -32,9 +32,9 @@ public:
template
<>
class
MPI_IallreduceW
<
int
>
{
public:
static
inline
void
reduce
(
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_INT
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_INT
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -44,9 +44,9 @@ public:
template
<>
class
MPI_IallreduceW
<
unsigned
int
>
{
public:
static
inline
void
reduce
(
unsigned
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
unsigned
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -56,9 +56,9 @@ public:
template
<>
class
MPI_IallreduceW
<
short
>
{
public:
static
inline
void
reduce
(
short
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
short
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_SHORT
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_SHORT
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -68,9 +68,9 @@ public:
template
<>
class
MPI_IallreduceW
<
unsigned
short
>
{
public:
static
inline
void
reduce
(
unsigned
short
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
unsigned
short
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_SHORT
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_SHORT
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -80,9 +80,9 @@ public:
template
<>
class
MPI_IallreduceW
<
char
>
{
public:
static
inline
void
reduce
(
char
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
char
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_CHAR
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_CHAR
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -92,9 +92,9 @@ public:
template
<>
class
MPI_IallreduceW
<
unsigned
char
>
{
public:
static
inline
void
reduce
(
unsigned
char
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
unsigned
char
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_CHAR
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_CHAR
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -104,9 +104,9 @@ public:
template
<>
class
MPI_IallreduceW
<
size_t
>
{
public:
static
inline
void
reduce
(
size_t
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
size_t
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_LONG
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_UNSIGNED_LONG
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -116,9 +116,9 @@ public:
template
<>
class
MPI_IallreduceW
<
long
int
>
{
public:
static
inline
void
reduce
(
long
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
long
int
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_LONG
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_LONG
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -128,9 +128,9 @@ public:
template
<>
class
MPI_IallreduceW
<
float
>
{
public:
static
inline
void
reduce
(
float
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
float
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_FLOAT
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_FLOAT
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -140,9 +140,9 @@ public:
template
<>
class
MPI_IallreduceW
<
double
>
{
public:
static
inline
void
reduce
(
double
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
)
static
inline
void
reduce
(
double
&
buf
,
MPI_Op
op
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_DOUBLE
,
op
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Iallreduce
(
MPI_IN_PLACE
,
&
buf
,
1
,
MPI_DOUBLE
,
op
,
ext_comm
,
&
req
));
}
};
...
...
@@ -154,9 +154,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<int>>
{
public:
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<int> & buf,MPI_Op op, MPI_Request & req
, MPI_Comm ext_comm
)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op,
MPI_COMM_WORLD
,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_INT, op,
ext_comm
,&req);
}
};*/
...
...
@@ -166,9 +166,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<short>>
{
public:
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<short> & buf,MPI_Op op, MPI_Request & req
, MPI_Comm ext_comm
)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op,
MPI_COMM_WORLD
,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_SHORT, op,
ext_comm
,&req);
}
};*/
...
...
@@ -178,9 +178,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<char>>
{
public:
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<char> & buf,MPI_Op op, MPI_Request & req
, MPI_Comm ext_comm
)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op,
MPI_COMM_WORLD
,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_CHAR, op,
ext_comm
,&req);
}
};*/
...
...
@@ -190,9 +190,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<size_t>>
{
public:
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<size_t> & buf,MPI_Op op, MPI_Request & req
, MPI_Comm ext_comm
)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op,
MPI_COMM_WORLD
,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_UNSIGNED_LONG, op,
ext_comm
,&req);
}
};*/
...
...
@@ -202,9 +202,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<float>>
{
public:
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<float> & buf,MPI_Op op, MPI_Request & req
, MPI_Comm ext_comm
)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op,
MPI_COMM_WORLD
,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_FLOAT, op,
ext_comm
,&req);
}
};*/
...
...
@@ -215,9 +215,9 @@ public:
/*template<> class MPI_IallreduceW<openfpm::vector<double>>
{
public:
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req)
static inline void reduce(openfpm::vector<double> & buf,MPI_Op op, MPI_Request & req
, MPI_Comm ext_comm
)
{
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op,
MPI_COMM_WORLD
,&req);
MPI_Iallreduce(MPI_IN_PLACE, &buf.get(0), buf.size(),MPI_DOUBLE, op,
ext_comm
,&req);
}
};*/
...
...
src/MPI_wrapper/MPI_IrecvW.hpp
View file @
8b660cc6
...
...
@@ -22,9 +22,9 @@ public:
* \param req MPI request
*
*/
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
void
*
buf
,
size_t
sz
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
void
*
buf
,
size_t
sz
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
buf
,
sz
,
MPI_BYTE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
buf
,
sz
,
MPI_BYTE
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -37,9 +37,9 @@ public:
template
<
typename
T
>
class
MPI_IrecvW
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
T
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
T
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -50,9 +50,9 @@ public:
template
<>
class
MPI_IrecvW
<
int
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
int
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
int
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -62,9 +62,9 @@ public:
template
<>
class
MPI_IrecvW
<
unsigned
int
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
int
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
int
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -74,9 +74,9 @@ public:
template
<>
class
MPI_IrecvW
<
short
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
short
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
short
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -86,9 +86,9 @@ public:
template
<>
class
MPI_IrecvW
<
unsigned
short
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
short
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
short
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -98,9 +98,9 @@ public:
template
<>
class
MPI_IrecvW
<
char
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
char
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
char
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -110,9 +110,9 @@ public:
template
<>
class
MPI_IrecvW
<
unsigned
char
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
char
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
char
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -122,9 +122,9 @@ public:
template
<>
class
MPI_IrecvW
<
size_t
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
size_t
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
size_t
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -134,9 +134,9 @@ public:
template
<>
class
MPI_IrecvW
<
long
int
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
long
int
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
long
int
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -146,9 +146,9 @@ public:
template
<>
class
MPI_IrecvW
<
float
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
float
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
float
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
@@ -158,9 +158,9 @@ public:
template
<>
class
MPI_IrecvW
<
double
>
{
public:
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
double
>
&
v
,
MPI_Request
&
req
)
static
inline
void
recv
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
double
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
));
MPI_SAFE_CALL
(
MPI_Irecv
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
tag
,
ext_comm
,
&
req
));
}
};
...
...
src/MPI_wrapper/MPI_IsendW.hpp
View file @
8b660cc6
...
...
@@ -16,9 +16,9 @@
class
MPI_IsendWB
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
const
void
*
buf
,
size_t
sz
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
const
void
*
buf
,
size_t
sz
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
buf
,
sz
,
MPI_BYTE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
buf
,
sz
,
MPI_BYTE
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -31,9 +31,9 @@ public:
template
<
typename
T
,
typename
Mem
,
typename
gr
>
class
MPI_IsendW
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
T
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
T
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
()
*
sizeof
(
T
),
MPI_BYTE
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -44,9 +44,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
int
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
int
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
int
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_INT
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -56,9 +56,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
unsigned
int
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
int
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
int
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -68,9 +68,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
short
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
short
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
short
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_SHORT
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -80,9 +80,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
unsigned
short
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
short
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
short
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_SHORT
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -92,9 +92,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
char
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
char
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
char
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_CHAR
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -104,9 +104,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
unsigned
char
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
char
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
unsigned
char
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_CHAR
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -116,9 +116,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
size_t
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
size_t
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
size_t
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_UNSIGNED_LONG
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -128,9 +128,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
long
int
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
long
int
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
long
int
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_LONG
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -140,9 +140,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
float
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
float
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
float
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_FLOAT
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
@@ -152,9 +152,9 @@ public:
template
<
typename
Mem
,
typename
gr
>
class
MPI_IsendW
<
double
,
Mem
,
gr
>
{
public:
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
double
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
)
static
inline
void
send
(
size_t
proc
,
size_t
tag
,
openfpm
::
vector
<
double
,
Mem
,
gr
>
&
v
,
MPI_Request
&
req
,
MPI_Comm
ext_comm
)
{
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
tag
,
MPI_COMM_WORLD
,
&
req
);
MPI_Isend
(
v
.
getPointer
(),
v
.
size
(),
MPI_DOUBLE
,
proc
,
tag
,
ext_comm
,
&
req
);
}
};
...
...
src/VCluster/VCluster.cpp
View file @
8b660cc6
...
...
@@ -8,12 +8,14 @@
#include "util/print_stack.hpp"
#include "util/math_util_complex.hpp"
init_options
global_option
;
Vcluster
<>
*
global_v_cluster_private_heap
=
NULL
;
Vcluster
<
CudaMemory
>
*
global_v_cluster_private_cuda
=
NULL
;
//
std
::
vector
<
int
>
sieve_spf
;
// number of vcluster instances
size_t
n_vcluster
=
0
;
bool
ofp_initialized
=
false
;
...
...
src/VCluster/VCluster.hpp
View file @
8b660cc6
...
...
@@ -336,8 +336,8 @@ class Vcluster: public Vcluster_base<InternalMemory>
* \param argv main set of arguments
*
*/
Vcluster
(
int
*
argc
,
char
***
argv
)
:
Vcluster_base
<
InternalMemory
>
(
argc
,
argv
)
Vcluster
(
int
*
argc
,
char
***
argv
,
MPI_Comm
ext_comm
=
MPI_COMM_WORLD
)
:
Vcluster_base
<
InternalMemory
>
(
argc
,
argv
,
ext_comm
)
{
}
...
...
@@ -878,34 +878,123 @@ class Vcluster: public Vcluster_base<InternalMemory>
};
enum
init_options
{
none
=
0x0
,
in_situ_visualization
=
0x1
,
};
extern
init_options
global_option
;
// Function to initialize the global VCluster //
extern
Vcluster
<>
*
global_v_cluster_private_heap
;
extern
Vcluster
<
CudaMemory
>
*
global_v_cluster_private_cuda
;
static
inline
void
delete_global_v_cluster_private
()
{
delete
global_v_cluster_private_heap
;
delete
global_v_cluster_private_cuda
;
}
/*! \brief Finalize the library
*
* This function MUST be called at the end of the program
*
*/
static
inline
void
openfpm_finalize
()
{
if
(
global_option
==
init_options
::
in_situ_visualization
)
{
MPI_Request
bar_req
;
MPI_Ibarrier
(
MPI_COMM_WORLD
,
&
bar_req
);
}
#ifdef HAVE_PETSC
PetscFinalize
();
#endif
delete_global_v_cluster_private
();
ofp_initialized
=
false
;
#ifdef CUDA_GPU
// Release memory
mem_tmp
.
destroy
();
mem_tmp
.
decRef
();
#endif
}
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine
*
* Initialize a global instance of Runtime Virtual Cluster Machine
*
*/
static
inline
void
init_global_v_cluster_private
(
int
*
argc
,
char
***
argv
)
static
inline
void
init_global_v_cluster_private
(
int
*
argc
,
char
***
argv
,
init_options
option
)
{
if
(
global_v_cluster_private_heap
==
NULL
)
{
global_v_cluster_private_heap
=
new
Vcluster
<>
(
argc
,
argv
);}
global_option
=
option
;
if
(
option
==
init_options
::
in_situ_visualization
)
{
int
flag
;
MPI_Initialized
(
&
flag
);
if
(
global_v_cluster_private_cuda
==
NULL
)
{
global_v_cluster_private_cuda
=
new
Vcluster
<
CudaMemory
>
(
argc
,
argv
);}
}
if
(
flag
==
false
)
{
MPI_Init
(
argc
,
argv
);}
static
inline
void
delete_global_v_cluster_private
()
{
delete
global_v_cluster_private_heap
;
delete
global_v_cluster_private_cuda
;
MPI_Comm
com_compute
;
int
rank
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
rank
==
0
)
{
MPI_Comm_split
(
MPI_COMM_WORLD
,
MPI_UNDEFINED
,
rank
,
&
com_compute
);}
else
{
MPI_Comm_split
(
MPI_COMM_WORLD
,
0
,
rank
,
&
com_compute
);}
if
(
rank
!=
0
)
{
if
(
global_v_cluster_private_heap
==
NULL
)
{
global_v_cluster_private_heap
=
new
Vcluster
<>
(
argc
,
argv
,
com_compute
);}
if
(
global_v_cluster_private_cuda
==
NULL
)
{
global_v_cluster_private_cuda
=
new
Vcluster
<
CudaMemory
>
(
argc
,
argv
,
com_compute
);}
}
else
{
int
flag
=
false
;
MPI_Request
bar_req
;
MPI_Ibarrier
(
MPI_COMM_WORLD
,
&
bar_req
);
//! barrier status
MPI_Status
bar_stat
;
while
(
flag
==
false
)
{
std
::
cout
<<
"I am node "
<<
rank
<<
std
::
endl
;
sleep
(
1
);
MPI_SAFE_CALL
(
MPI_Test
(
&
bar_req
,
&
flag
,
&
bar_stat
));
}
openfpm_finalize
();
exit
(
0
);
}
}
else
{
if
(
global_v_cluster_private_heap
==
NULL
)
{
global_v_cluster_private_heap
=
new
Vcluster
<>
(
argc
,
argv
);}
if
(
global_v_cluster_private_cuda
==
NULL
)
{
global_v_cluster_private_cuda
=
new
Vcluster
<
CudaMemory
>
(
argc
,
argv
);}
}
}
template
<
typename
Memory
>
struct
get_vcl
{
...
...
@@ -951,7 +1040,7 @@ static inline bool is_openfpm_init()
* This function MUST be called before any other function
*
*/
static
inline
void
openfpm_init
(
int
*
argc
,
char
***
argv
)
static
inline
void
openfpm_init
(
int
*
argc
,
char
***
argv
,
init_options
option
=
init_options
::
none
)
{
#ifdef HAVE_PETSC
...
...
@@ -959,7 +1048,7 @@ static inline void openfpm_init(int *argc, char ***argv)
#endif
init_global_v_cluster_private
(
argc
,
argv
);
init_global_v_cluster_private
(
argc
,
argv
,
option
);
#ifdef SE_CLASS1
std
::
cout
<<
"OpenFPM is compiled with debug mode LEVEL:1. Remember to remove SE_CLASS1 when you go in production"
<<
std
::
endl
;
...
...
@@ -1000,30 +1089,6 @@ static inline void openfpm_init(int *argc, char ***argv)
}
/*! \brief Finalize the library
*
* This function MUST be called at the end of the program
*
*/
static
inline
void
openfpm_finalize
()
{
#ifdef HAVE_PETSC
PetscFinalize
();
#endif
delete_global_v_cluster_private
();
ofp_initialized
=
false
;
#ifdef CUDA_GPU
// Release memory
mem_tmp
.
destroy
();
mem_tmp
.
decRef
();
#endif
}
#endif
...
...
src/VCluster/VCluster_base.hpp
View file @
8b660cc6
...
...
@@ -112,6 +112,9 @@ union red
template
<
typename
InternalMemory
>
class
Vcluster_base
{
//! external communicator
MPI_Comm
ext_comm
;
//! log file
Vcluster_log
log
;
...
...
@@ -238,8 +241,8 @@ public:
* \param argv pointer to arguments vector passed to the program
*
*/
Vcluster_base
(
int
*
argc
,
char
***
argv
)
:
NBX_cnt
(
0
)
Vcluster_base
(
int
*
argc
,
char
***
argv
,
MPI_Comm
ext_comm
)
:
ext_comm
(
ext_comm
),
NBX_cnt
(
0
)
{
#ifdef SE_CLASS2
check_new
(
this
,
8
,
VCLUSTER_EVENT
,
PRJ_VCLUSTER
);
...
...
@@ -259,7 +262,7 @@ public:
// We try to get the local processors rank
MPI_Comm
shmcomm
;
MPI_Comm_split_type
(
MPI_COMM_WORLD
,
MPI_COMM_TYPE_SHARED
,
0
,
MPI_Comm_split_type
(
ext_comm
,
MPI_COMM_TYPE_SHARED
,
0
,
MPI_INFO_NULL
,
&
shmcomm
);
MPI_Comm_rank
(
shmcomm
,
&
shmrank
);
...
...
@@ -268,8 +271,8 @@ public:
// Get the total number of process
// and the rank of this process
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
m_size
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
m_rank
);
MPI_Comm_size
(
ext_comm
,
&
m_size
);
MPI_Comm_rank
(
ext_comm
,
&
m_rank
);
#ifdef SE_CLASS2
process_v_cl
=
m_rank
;
...
...
@@ -379,7 +382,7 @@ public:
*/
MPI_Comm
getMPIComm
()
{
return
MPI_COMM_WORLD
;
return
ext_comm
;
}
/*! \brief Get the total number of processors
...
...
@@ -449,7 +452,7 @@ public:
req
.
add
();
// reduce
MPI_IallreduceW
<
T
>::
reduce
(
num
,
MPI_SUM
,
req
.
last
());
MPI_IallreduceW
<
T
>::
reduce
(
num
,
MPI_SUM
,
req
.
last
()
,
ext_comm
);
}
/*! \brief Get the maximum number across all processors (or reduction with infinity norm)
...
...
@@ -468,7 +471,7 @@ public:
req
.
add
();
// reduce
MPI_IallreduceW
<
T
>::
reduce
(
num
,
MPI_MAX
,
req
.
last
());
MPI_IallreduceW
<
T
>::
reduce
(
num
,
MPI_MAX
,
req
.
last
()
,
ext_comm
);
}
/*! \brief Get the minimum number across all processors (or reduction with insinity norm)
...
...
@@ -488,7 +491,7 @@ public:
req
.
add
();
// reduce
MPI_IallreduceW
<
T
>::
reduce
(
num
,
MPI_MIN
,
req
.
last
());
MPI_IallreduceW
<
T
>::
reduce
(
num
,
MPI_MIN
,
req
.
last
()
,
ext_comm
);
}
/*! \brief Send and receive multiple messages
...
...
@@ -831,7 +834,7 @@ public:
#endif
tot_sent
+=
sz
[
i
];
MPI_SAFE_CALL
(
MPI_Issend
(
ptr
[
i
],
sz
[
i
],
MPI_BYTE
,
prc
[
i
],
SEND_SPARSE
+
NBX_cnt
*
131072
+
i
,
MPI_COMM_WORLD
,
&
req
.
last
()));
MPI_SAFE_CALL
(
MPI_Issend
(
ptr
[
i
],
sz
[
i
],
MPI_BYTE
,
prc
[
i
],
SEND_SPARSE
+
NBX_cnt
*
131072
+
i
,
ext_comm
,
&
req
.
last
()));
log
.
logSend
(
prc
[
i
]);
}
}
...
...
@@ -852,7 +855,7 @@ public:
MPI_Status
stat_t
;
int
stat
=
false
;
MPI_SAFE_CALL
(
MPI_Iprobe
(
MPI_ANY_SOURCE
,
MPI_ANY_TAG
/*SEND_SPARSE + NBX_cnt*/
,
MPI_COMM_WORLD
,
&
stat
,
&
stat_t
));
MPI_SAFE_CALL
(
MPI_Iprobe
(
MPI_ANY_SOURCE
,
MPI_ANY_TAG
/*SEND_SPARSE + NBX_cnt*/
,
ext_comm
,
&
stat
,
&
stat_t
));
// If I have an incoming message and is related to this NBX communication
if
(
stat
==
true
)
...
...
@@ -878,7 +881,7 @@ public:
check_valid
(
ptr
,
msize
);
#endif
tot_recv
+=
msize
;
MPI_SAFE_CALL
(
MPI_Recv
(
ptr
,
msize
,
MPI_BYTE
,
stat_t
.
MPI_SOURCE
,
stat_t
.
MPI_TAG
,
MPI_COMM_WORLD
,
&
stat_t
));
MPI_SAFE_CALL
(
MPI_Recv
(
ptr
,
msize
,
MPI_BYTE
,
stat_t
.
MPI_SOURCE
,
stat_t
.
MPI_TAG
,
ext_comm
,
&
stat_t
));
#ifdef SE_CLASS2
check_valid
(
ptr
,
msize
);
...
...
@@ -898,7 +901,7 @@ public:
// If all send has been completed
if
(
flag
==
true
)
{
MPI_SAFE_CALL
(
MPI_Ibarrier
(
MPI_COMM_WORLD
,
&
bar_req
));
reached_bar_req
=
true
;}
{
MPI_SAFE_CALL
(
MPI_Ibarrier
(
ext_comm
,
&
bar_req
));
reached_bar_req
=
true
;}
}
// Check if all processor reached the async barrier
...
...
@@ -946,7 +949,7 @@ public:
req
.
add
();
// send
MPI_IsendWB
::
send
(
proc
,
SEND_RECV_BASE
+
tag
,
mem
,
sz
,
req
.
last
());
MPI_IsendWB
::
send
(
proc
,
SEND_RECV_BASE
+
tag
,
mem
,
sz
,
req
.
last
()
,
ext_comm
);
return
true
;
}
...
...
@@ -981,7 +984,7 @@ public:
req
.
add
();
// send
MPI_IsendW
<
T
,
Mem
,
gr
>::
send
(
proc
,
SEND_RECV_BASE
+
tag
,
v
,
req
.
last
());
MPI_IsendW
<
T
,
Mem
,
gr
>::
send
(
proc
,
SEND_RECV_BASE
+
tag
,
v
,
req
.
last
()
,
ext_comm
);
return
true
;
}
...
...
@@ -1012,7 +1015,7 @@ public:
req
.
add
();
// receive
MPI_IrecvWB
::
recv
(
proc
,
SEND_RECV_BASE
+
tag
,
v
,
sz
,
req
.
last
());
MPI_IrecvWB
::
recv
(
proc
,
SEND_RECV_BASE
+
tag
,
v
,
sz
,
req
.
last
()
,
ext_comm
);
return
true
;
}
...
...
@@ -1046,7 +1049,7 @@ public:
req
.
add
();
// receive
MPI_IrecvW
<
T
>::
recv
(
proc
,
SEND_RECV_BASE
+
tag
,
v
,
req
.
last
());
MPI_IrecvW
<
T
>::
recv
(
proc
,
SEND_RECV_BASE
+
tag
,
v
,
req
.
last
()
,
ext_comm
);
return
true
;
}
...
...
@@ -1076,7 +1079,7 @@ public:
v
.
resize
(
getProcessingUnits
());
// gather
MPI_IAllGatherW
<
T
>::
gather
(
&
send
,
1
,
v
.
getPointer
(),
1
,
req
.
last
());
MPI_IAllGatherW
<
T
>::
gather
(
&
send
,
1
,
v
.
getPointer
(),
1
,
req
.
last
()
,
ext_comm
);
return
true
;
}
...
...
@@ -1104,7 +1107,7 @@ public:
checkType
<
T
>
();
#endif
b_cast_helper
<
openfpm
::
vect_isel
<
T
>::
value
==
STD_VECTOR
||
is_layout_mlin
<
layout_base
<
T
>>::
value
>::
bcast_
(
req
,
v
,
root
);
b_cast_helper
<
openfpm
::
vect_isel
<
T
>::
value
==
STD_VECTOR
||
is_layout_mlin
<
layout_base
<
T
>>::
value
>::
bcast_
(
req
,
v
,
root
,
ext_comm
);
return
true
;
}
...
...
src/VCluster/VCluster_unit_test_util.hpp
View file @
8b660cc6
...
...
@@ -217,10 +217,8 @@ template<unsigned int ip> void test_no_send_some_peer()
}
}
template
<
unsigned
int
ip
>
void
test_known
()
template
<
unsigned
int
ip
>
void
test_known
(
Vcluster
<>
&
vcl
)
{
Vcluster
<>
&
vcl
=
create_vcluster
();
// send/recv messages
global_rank
=
vcl
.
getProcessUnitID
();
...
...
@@ -325,10 +323,8 @@ template<unsigned int ip> void test_known()
}
}
template
<
unsigned
int
ip
>
void
test
(
unsigned
int
opt
)
template
<
unsigned
int
ip
>
void
test
(
Vcluster
<>
&
vcl
,
unsigned
int
opt
)
{
Vcluster
<>
&
vcl
=
create_vcluster
();
// send/recv messages
global_rank
=
vcl
.
getProcessUnitID
();
...
...
src/VCluster/VCluster_unit_tests.cpp
View file @
8b660cc6
...
...
@@ -191,7 +191,9 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv)
std
::
cout
<<
"VCluster unit test start sendrecv"
<<
"
\n
"
;
totp_check
=
false
;
test
<
NBX
>
(
RECEIVE_UNKNOWN
);
auto
&
v_cl
=
create_vcluster
();
test
<
NBX
>
(
v_cl
,
RECEIVE_UNKNOWN
);
totp_check
=
false
;
test_no_send_some_peer
<
NBX
>
();
...
...
@@ -203,8 +205,10 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv_size_known)
{
std
::
cout
<<
"VCluster unit test start sendrecv known size"
<<
"
\n
"
;
auto
&
v_cl
=
create_vcluster
();
totp_check
=
false
;
test
<
NBX
>
(
RECEIVE_SIZE_UNKNOWN
);
test
<
NBX
>
(
v_cl
,
RECEIVE_SIZE_UNKNOWN
);
totp_check
=
false
;
test_no_send_some_peer
<
NBX
>
();
...
...
@@ -216,12 +220,32 @@ BOOST_AUTO_TEST_CASE( VCluster_use_sendrecv_known )
{
std
::
cout
<<
"VCluster unit test start known"
<<
"
\n
"
;
test_known
<
NBX
>
();
test_known
<
NBX
>
(
create_vcluster
()
);
std
::
cout
<<
"VCluster unit test stop known"
<<
"
\n
"
;
}
BOOST_AUTO_TEST_CASE
(
VCluster_communicator_with_external_communicator
)
{
std
::
cout
<<
"VCluster unit test external communicator start"
<<
std
::
endl
;
MPI_Comm
com_compute
;
int
rank
=
create_vcluster
().
rank
();
if
(
rank
==
0
)
{
MPI_Comm_split
(
MPI_COMM_WORLD
,
MPI_UNDEFINED
,
rank
,
&
com_compute
);}
else
{
MPI_Comm_split
(
MPI_COMM_WORLD
,
0
,
rank
,
&
com_compute
);}
if
(
rank
!=
0
)
{
Vcluster
<>
v_cl
(
&
boost
::
unit_test
::
framework
::
master_test_suite
().
argc
,
&
boost
::
unit_test
::
framework
::
master_test_suite
().
argv
,
com_compute
);
test_known
<
NBX
>
(
v_cl
);
test
<
NBX
>
(
v_cl
,
RECEIVE_SIZE_UNKNOWN
);
}
std
::
cout
<<
"VCluster unit test external communicator stop"
<<
std
::
endl
;
}
BOOST_AUTO_TEST_SUITE_END
()