Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
O
openfpm_vcluster
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
Requirements
Requirements
List
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Operations
Operations
Environments
Analytics
Analytics
CI / CD
Code Review
Insights
Issue
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
openfpm
openfpm_vcluster
Commits
6617678c
Commit
6617678c
authored
Jul 11, 2015
by
incardon
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fixing 256 processors
parent
3bd67863
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
10 additions
and
10 deletions
+10
-10
build_vcluster.sh
build_vcluster.sh
+5
-5
configure.ac
configure.ac
+1
-1
src/VCluster.hpp
src/VCluster.hpp
+4
-4
No files found.
build_vcluster.sh
View file @
6617678c
...
...
@@ -77,15 +77,15 @@ then
make
if
[
$?
-ne
0
]
;
then
exit
1
;
fi
salloc
-
-nodes
=
1
--ntasks-per-node
=
16
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
16 src/vcluster
salloc
-
n
16
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
16 src/vcluster
if
[
$?
-ne
0
]
;
then
exit
1
;
fi
salloc
-
-nodes
=
2
--ntasks-per-node
=
16
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
32 src/vcluster
salloc
-
n
=
32
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
32 src/vcluster
if
[
$?
-ne
0
]
;
then
exit
1
;
fi
salloc
-
-nodes
=
4
--ntasks-per-node
=
16
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
64 src/vcluster
salloc
-
n
=
64
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
64 src/vcluster
if
[
$?
-ne
0
]
;
then
exit
1
;
fi
salloc
-
-nodes
=
8
--ntasks-per-node
=
16
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
128 src/vcluster
salloc
-
n
=
128
--time
=
00:05:00
--mem-per-cpu
=
1900
--partition
=
sandy mpirun
-np
128 src/vcluster
if
[
$?
-ne
0
]
;
then
exit
1
;
fi
salloc
-
-nodes
=
16
--ntasks-per-node
=
16
--time
=
00:5:00
--mem-per-cpu
=
1900
--partition
=
sandy
mpirun
-np
256 src/vcluster
salloc
-
n
=
256
--time
=
00:5:00
--mem-per-cpu
=
1900
--partition
=
sandy
mpirun
-np
256 src/vcluster
if
[
$?
-ne
0
]
;
then
exit
1
;
fi
else
...
...
configure.ac
View file @
6617678c
...
...
@@ -100,7 +100,7 @@ AC_CHECK_LIB(rt, clock_gettime,[DEFAULT_LIB+=" -lrt "],[],[])
##### CHECK FOR BOOST ##############
AX_BOOST([1.
41
],[],[])
AX_BOOST([1.
52
],[],[])
####### Checking for GPU support
...
...
src/VCluster.hpp
View file @
6617678c
...
...
@@ -453,6 +453,10 @@ public:
sendrecvMultipleMessagesPCX
(
prc
.
size
(),(
size_t
*
)
map
.
getPointer
(),(
size_t
*
)
sz_send
.
getPointer
(),(
size_t
*
)
prc
.
getPointer
(),(
void
**
)
ptr_send
.
getPointer
(),
msg_alloc
,
ptr_arg
,
opt
);
}
MPI_Request
bar_req
;
// barrier status
MPI_Status
bar_stat
;
/*! \brief Send and receive multiple messages local
*
* It send multiple messages to the near processor the and receive
...
...
@@ -508,7 +512,6 @@ public:
int
flag
=
false
;
bool
reached_bar_req
=
false
;
MPI_Request
bar_req
;
log
.
start
(
10
);
...
...
@@ -556,9 +559,6 @@ public:
{
MPI_SAFE_CALL
(
MPI_Ibarrier
(
MPI_COMM_WORLD
,
&
bar_req
));
reached_bar_req
=
true
;}
}
// barrier status
MPI_Status
bar_stat
;
// Check if all processor reached the async barrier
if
(
reached_bar_req
)
{
MPI_SAFE_CALL
(
MPI_Test
(
&
bar_req
,
&
flag
,
&
bar_stat
))};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment