Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Sbalzarini Lab
S
Software
P
Parallel Computing
OpenFPM
openfpm_pdata
Commits
1d8d43cb
Commit
1d8d43cb
authored
Feb 02, 2015
by
incardon
Browse files
Fixed cartesian decomposition
parent
2f7b6f8f
Changes
11
Hide whitespace changes
Inline
Side-by-side
src/CartesianGraphFactory_unit_test.hpp
View file @
1d8d43cb
...
...
@@ -47,10 +47,7 @@ BOOST_AUTO_TEST_CASE( CartesianGraphFactory_use)
CartesianGraphFactory
<
3
,
Graph_CSR
<
Point_test
<
float
>
,
Point_test
<
float
>>>
g_factory
;
// Cartesian grid
std
::
vector
<
size_t
>
sz
;
sz
.
push_back
(
GS_SIZE
);
sz
.
push_back
(
GS_SIZE
);
sz
.
push_back
(
GS_SIZE
);
size_t
sz
[
3
]
=
{
GS_SIZE
,
GS_SIZE
,
GS_SIZE
};
// Box
Box
<
3
,
float
>
box
({
0.0
,
0.0
,
0.0
},{
1.0
,
1.0
,
1.0
});
...
...
src/Graph/CartesianGraphFactory.hpp
View file @
1d8d43cb
...
...
@@ -84,16 +84,8 @@ class Graph_constructor_impl
{
public:
//! Construct cartesian graph
static
Graph
construct
(
s
td
::
vector
<
size_t
>
sz
,
Box
<
dim
,
T
>
dom
)
static
Graph
construct
(
s
ize_t
(
&
sz
)[
dim
]
,
Box
<
dim
,
T
>
dom
)
{
#ifdef DEBUG
//! The size is wrong signal it
if
(
sz
.
size
()
!=
dim
)
{
std
::
cerr
<<
"Error this factory has been specialized for catesian grid of dimension "
<<
dim
<<
"
\n
"
;}
#endif
// Calculate the size of the hyper-cubes on each dimension
T
szd
[
dim
];
...
...
@@ -202,16 +194,8 @@ class Graph_constructor_impl<dim,Graph,NO_EDGE,T,dim_c,pos...>
{
public:
//! Construct cartesian graph
static
Graph
construct
(
s
td
::
vector
<
size_t
>
sz
,
Box
<
dim
,
T
>
dom
)
static
Graph
construct
(
s
ize_t
(
&
sz
)[
dim
]
,
Box
<
dim
,
T
>
dom
)
{
#ifdef DEBUG
//! The size is wrong signal it
if
(
sz
.
size
()
!=
dim
)
{
std
::
cerr
<<
"Error this factory has been specialized for catesian grid of dimension "
<<
dim
<<
"
\n
"
;}
#endif
// Calculate the size of the hyper-cubes on each dimension
T
szd
[
dim
];
...
...
@@ -385,7 +369,7 @@ public:
*
*/
template
<
unsigned
int
se
,
typename
T
,
unsigned
int
dim_c
,
int
...
pos
>
static
Graph
construct
(
s
td
::
vector
<
size_t
>
sz
,
Box
<
dim
,
T
>
dom
)
static
Graph
construct
(
s
ize_t
(
&
sz
)[
dim
]
,
Box
<
dim
,
T
>
dom
)
{
return
Graph_constructor_impl
<
dim
,
Graph
,
se
,
T
,
dim_c
,
pos
...
>::
construct
(
sz
,
dom
);
}
...
...
src/dec_optimizer.hpp
View file @
1d8d43cb
...
...
@@ -59,17 +59,13 @@ public:
*
*/
template
<
typename
encap
>
static
Box
<
dim
,
size_t
>
getBox
(
encap
&&
enc
)
template
<
typename
encap
>
static
Box
<
dim
,
size_t
>
getBox
(
const
encap
&&
enc
)
{
Box
<
dim
,
size_t
>
bx
;
// Create the object from the encapsulation
for
(
int
i
=
0
;
i
<
dim
;
i
++
)
{
bx
.
setHigh
(
i
,
enc
.
template
get
<
wavefront
::
start
>()[
i
]);
bx
.
setLow
(
i
,
enc
.
template
get
<
wavefront
::
stop
>()[
i
]);
}
getBox
(
enc
,
bx
);
return
bx
;
}
...
...
@@ -86,8 +82,8 @@ public:
for
(
int
i
=
0
;
i
<
dim
;
i
++
)
{
bx
.
set
High
(
i
,
enc
.
template
get
<
wavefront
::
start
>()[
i
]);
bx
.
set
Low
(
i
,
enc
.
template
get
<
wavefront
::
stop
>()[
i
]);
bx
.
set
Low
(
i
,
enc
.
template
get
<
wavefront
::
start
>()[
i
]);
bx
.
set
High
(
i
,
enc
.
template
get
<
wavefront
::
stop
>()[
i
]);
}
}
};
...
...
@@ -160,19 +156,23 @@ private:
// get the vertex and set the sub id
graph
.
vertex
(
gh
.
LinId
(
gk
)).
template
get
<
p_sub
>()
=
ids
;
// next subdomain
++
g_sub
;
}
}
/* \brief Add the boundary domain to the queue
/* \brief Add the boundary domain
of id p_id
to the queue
*
* \tparam i-property where is stored the decomposition
*
* \param domains vector with domains to process
* \param graph we are processing
* \param w_comb hyper-cube combinations
* \param p_id processor id
*
*/
template
<
unsigned
int
p_sub
>
void
add_to_queue
(
openfpm
::
vector
<
size_t
>
&
domains
,
openfpm
::
vector
<
wavefront
<
dim
>>
&
v_w
,
Graph
&
graph
,
std
::
vector
<
comb
<
dim
>>
&
w_comb
)
template
<
unsigned
int
p_sub
,
unsigned
int
p_id
>
void
add_to_queue
(
openfpm
::
vector
<
size_t
>
&
domains
,
openfpm
::
vector
<
wavefront
<
dim
>>
&
v_w
,
Graph
&
graph
,
std
::
vector
<
comb
<
dim
>>
&
w_comb
,
long
int
pr_id
)
{
// create a new queue
openfpm
::
vector
<
size_t
>
domains_new
;
...
...
@@ -201,7 +201,7 @@ private:
}
}
// for each expanded wavefront create a sub-grid iterator and add the sub
b
omain
// for each expanded wavefront create a sub-grid iterator and add the sub
-d
omain
for
(
int
d
=
0
;
d
<
v_w
.
size
()
;
d
++
)
{
...
...
@@ -215,19 +215,27 @@ private:
// get the actual key
const
grid_key_dx
<
dim
>
&
gk
=
g_sub
.
get
();
// get the vertex and
check if is not assigne
d a
d
d
to the queue
// get the vertex and
if does not have a sub-i
d a
n
d
is assigned ...
if
(
graph
.
vertex
(
gh
.
LinId
(
gk
)).
template
get
<
p_sub
>()
<
0
)
{
// add
domains_new
.
add
(
gh
.
LinId
(
gk
));
// ... and the p_id different from -1
if
(
pr_id
!=
-
1
)
{
// ... and the processor id of the sub-domain match p_id, add to the queue
if
(
pr_id
==
graph
.
vertex
(
gh
.
LinId
(
gk
)).
template
get
<
p_id
>()
)
domains_new
.
add
(
gh
.
LinId
(
gk
));
}
else
domains_new
.
add
(
gh
.
LinId
(
gk
));
}
++
g_sub
;
}
}
// copy the new queue to the old one (it not copied, C++11 move semantic)
domains
=
domains_new
;
domains
.
swap
(
domains_new
)
;
}
/* \brief Find the biggest hyper-cube
...
...
@@ -274,18 +282,16 @@ private:
// flag to indicate if the wavefront can expand
bool
w_can_expand
=
true
;
// Create an iterator of the wavefront
grid_key_dx_iterator_sub
<
dim
>
it
(
gh
,
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
d
),
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
d
));
// Create an iterator of the expanded wavefront
grid_key_dx
<
dim
>
start
=
grid_key_dx
<
dim
>
(
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
d
))
+
w_comb
[
d
];
grid_key_dx
<
dim
>
stop
=
grid_key_dx
<
dim
>
(
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
d
))
+
w_comb
[
d
];
grid_key_dx_iterator_sub
<
dim
>
it
(
gh
,
start
,
stop
);
// for each subdomain
while
(
it
.
isNext
())
{
// get the wavefront sub-domain id
size_t
sub_w
=
gh
.
LinId
(
it
.
get
());
// get the sub-domain id of the expanded wavefront
grid_key_dx
<
dim
>
k_exp
=
it
.
get
()
+
w_comb
[
d
];
size_t
sub_w_e
=
gh
.
LinId
(
k_exp
);
size_t
sub_w_e
=
gh
.
LinId
(
it
.
get
());
// we get the processor id of the neighborhood sub-domain on direction d
size_t
exp_p
=
graph
.
vertex
(
sub_w_e
).
template
get
<
p_id
>();
...
...
@@ -347,25 +353,13 @@ private:
for
(
int
s
=
0
;
s
<
dim
;
s
++
)
{
if
(
is_pos
==
true
)
{
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
id
)[
j
]
=
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
id
)[
j
]
+
w_comb
[
i
d
].
c
[
j
];}
{
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
id
)[
s
]
=
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
id
)[
s
]
+
w_comb
[
d
].
c
[
s
];}
else
{
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
id
)[
j
]
=
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
id
)[
j
]
+
w_comb
[
i
d
].
c
[
j
];}
{
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
id
)[
s
]
=
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
id
)[
s
]
+
w_comb
[
d
].
c
[
s
];}
}
}
}
}
// Debug output the wavefront graph for debug
// duplicate the graph
Graph
g_debug
=
graph
.
duplicate
();
write_wavefront
<
nm_part_v
::
id
>
(
g_debug
,
v_w
);
VTKWriter
<
Graph
>
vtk
(
g_debug
);
vtk
.
template
write
<
1
>(
"vtk_debug.vtk"
);
////////////////////////////////////
}
// get back the hyper-cube produced
...
...
@@ -373,20 +367,31 @@ private:
for
(
int
i
=
0
;
i
<
dim
;
i
++
)
{
// get the index of the wavefront direction
int
w
=
0
;
size_t
p_f
=
hyp
.
positiveFace
(
i
);
size_t
n_f
=
hyp
.
negativeFace
(
i
);
// set the box
box
.
setHigh
(
i
,
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
p_f
)[
i
]);
box
.
setLow
(
i
,
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
n_f
)[
i
]);
}
}
/*! \brief Initialize the wavefront
*
* \param v_w Wavefront to initialize
*
*/
void
InitializeWavefront
(
grid_key_dx
<
dim
>
&
start_p
,
openfpm
::
vector
<
wavefront
<
dim
>>
&
v_w
)
{
// Wavefront to initialize
for
(
int
i
=
0
;
i
<
v_w
.
size
()
;
i
++
)
{
for
(
int
j
=
0
;
j
<
dim
;
j
++
)
{
if
(
w_comb
[
i
].
c
[
j
]
==
1
)
{
w
=
j
;
break
;
}
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
i
)[
j
]
=
start_p
.
get
(
j
);
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
i
)[
j
]
=
start_p
.
get
(
j
);
}
// set the box
box
.
setHigh
(
i
,
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
i
)[
w
]);
box
.
setLow
(
i
,
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
i
)[
w
]);
}
}
...
...
@@ -399,7 +404,7 @@ public:
*
*/
dec_optimizer
(
Graph
&
g
,
std
::
vector
<
size_t
>
sz
)
dec_optimizer
(
Graph
&
g
,
size_t
(
&
sz
)
[
dim
])
:
gh
(
sz
)
{
// The graph g is suppose to represent a cartesian grid
...
...
@@ -411,7 +416,7 @@ public:
* Starting from a domain (hyper-cubic), it create wavefront at the boundary and expand
* the boundary until the wavefronts cannot expand any more.
* To the domains inside the hyper-cube one sub-id is assigned. This procedure continue until
* all the domain of one id has a sub-id
* all the domain of one
p_
id has a sub-id
*
* \tparam j property containing the decomposition
* \tparam i property to fill with the sub-decomposition
...
...
@@ -422,6 +427,32 @@ public:
*/
template
<
unsigned
int
p_sub
,
unsigned
int
p_id
>
void
optimize
(
grid_key_dx
<
dim
>
&
start_p
,
Graph
&
graph
)
{
// temporal vector
openfpm
::
vector
<
Box
<
dim
,
size_t
>>
tmp
;
// optimize
optimize
<
p_sub
,
p_id
>
(
start_p
,
graph
,
-
1
,
tmp
);
}
/*! \brief optimize the graph
*
* Starting from a domain (hyper-cubic), it create wavefront at the boundary and expand
* the boundary until the wavefronts cannot expand any more.
* To the domains inside the hyper-cube one sub-id is assigned. This procedure continue until
* all the domain of one p_id has a sub-id
*
* \tparam j property containing the decomposition
* \tparam i property to fill with the sub-decomposition
*
* \param Seed point
* \param graph we are processing
* \param p_id Processor id (if p_id == -1 the optimization is done for all the processors)
* \param list of sub-domain boxes
*
*/
template
<
unsigned
int
p_sub
,
unsigned
int
p_id
>
void
optimize
(
grid_key_dx
<
dim
>
&
start_p
,
Graph
&
graph
,
long
int
pr_id
,
openfpm
::
vector
<
Box
<
dim
,
size_t
>>
&
lb
)
{
// sub-domain id
size_t
sub_id
=
0
;
...
...
@@ -438,19 +469,10 @@ public:
// wavefronts
openfpm
::
vector
<
wavefront
<
dim
>>
v_w
(
w_comb
.
size
());
// Initialize the wavefronts from the domain start_p
for
(
int
i
=
0
;
i
<
v_w
.
size
()
;
i
++
)
{
for
(
int
j
=
0
;
j
<
dim
;
j
++
)
{
v_w
.
template
get
<
wavefront
<
dim
>
::
start
>
(
i
)[
j
]
=
start_p
.
get
(
start_p
.
get
(
j
));
v_w
.
template
get
<
wavefront
<
dim
>
::
stop
>
(
i
)[
j
]
=
start_p
.
get
(
start_p
.
get
(
j
));
}
}
// fill the sub decomposition with negative number
fill_domain
<
p_sub
>
(
graph
,
gh
.
getBox
(),
-
1
);
// push the first domain
v_q
.
add
(
gh
.
LinId
(
start_p
));
while
(
v_q
.
size
()
!=
0
)
...
...
@@ -458,14 +480,23 @@ public:
// Box
Box
<
dim
,
size_t
>
box
;
// Get the grid_key position from the linearized id
start_p
=
gh
.
InvLinId
(
v_q
.
get
(
0
));
// Initialize the wavefronts from the domain start_p
InitializeWavefront
(
start_p
,
v_w
);
// Create the biggest box containing the domain
expand_from_point
<
p_sub
,
p_id
>
(
gh
.
LinId
(
start_p
),
graph
,
box
,
v_w
,
w_comb
);
expand_from_point
<
p_sub
,
p_id
>
(
gh
.
LinId
(
v_q
.
get
(
0
)),
graph
,
box
,
v_w
,
w_comb
);
// Add the created box to the list of boxes
lb
.
add
(
box
);
// fill the domain
fill_domain
<
p_sub
>
(
graph
,
box
,
sub_id
);
// create the queue
add_to_queue
<
p_sub
>
(
v_q
,
v_w
,
graph
,
w_comb
);
add_to_queue
<
p_sub
,
p_id
>
(
v_q
,
v_w
,
graph
,
w_comb
,
pr_id
);
// increment the sub_id
sub_id
++
;
...
...
src/dec_optimizer_unit_test.hpp
View file @
1d8d43cb
...
...
@@ -14,6 +14,7 @@
#include "metis_util.hpp"
#include "dec_optimizer.hpp"
#undef GS_SIZE
#define GS_SIZE 8
...
...
@@ -25,10 +26,7 @@ BOOST_AUTO_TEST_CASE( dec_optimizer_test_use)
CartesianGraphFactory
<
3
,
Graph_CSR
<
nm_part_v
,
nm_part_e
>>
g_factory_part
;
// Cartesian grid
std
::
vector
<
size_t
>
sz
;
sz
.
push_back
(
GS_SIZE
);
sz
.
push_back
(
GS_SIZE
);
sz
.
push_back
(
GS_SIZE
);
size_t
sz
[
3
]
=
{
GS_SIZE
,
GS_SIZE
,
GS_SIZE
};
// Box
Box
<
3
,
float
>
box
({
0.0
,
0.0
,
0.0
},{
1.0
,
1.0
,
1.0
});
...
...
src/gargabe.hpp
View file @
1d8d43cb
...
...
@@ -102,4 +102,27 @@
}
#ifndef PARALLEL_DECOMPOSITION
// CreateSubspaces();
#endif
#ifndef USE_METIS_GP
// Here we do not use METIS
// Distribute the divided domains
// Get the number of processing units
size_t
Np
=
v_cl
.
getProcessingUnits
();
// Get the ID of this processing unit
// and push the subspace is taking this
// processing unit
for
(
size_t
p_id
=
v_cl
.
getProcessUnitID
();
p_id
<
Np
;
p_id
+=
Np
)
id_sub
.
push_back
(
p_id
);
#else
#endif
#endif
/* GARGABE_HPP_ */
src/grid_dist.hpp
deleted
100644 → 0
View file @
2f7b6f8f
#ifndef COM_UNIT_HPP
#define COM_UNIT_HPP
#include <vector>
#include "Grid/map_grid.hpp"
#include "VCluster.hpp"
#include "Space/SpaceBox.hpp"
/*! \brief Grid key for a distributed grid
*
* Grid key for a distributed grid
*
*/
template
<
unsigned
int
dim
>
class
grid_dist_key_dx
{
//! grid list counter
size_t
g_c
;
//! Local grid iterator
grid_key_dx_iterator
<
dim
>
a_it
;
};
/*! \brief Distributed grid iterator
*
* Iterator across the local element of the distributed grid
*
*/
template
<
unsigned
int
dim
>
class
grid_dist_iterator
{
//! grid list counter
size_t
g_c
;
//! List of the grids we are going to iterate
std
::
vector
<
grid_key_dx
<
dim
>>
&
gList
;
//! Actual iterator
grid_key_dx_iterator
<
dim
>
a_it
;
public:
/*! \brief Constructor of the distributed grid
*
* \param gk std::vector of the local grid
*
*/
grid_dist_iterator
(
std
::
vector
<
grid_key_dx
<
dim
>>
&
gk
)
:
g_c
(
0
),
gList
(
gk
)
{
// Initialize with the current iterator
// with the first grid
a_it
=
gList
[
0
].
getIterator
();
}
// Destructor
~
grid_dist_iterator
()
{}
/*! \brief Get the next element
*
* \return the next grid_key
*
*/
grid_key_dx_iterator
<
dim
>
operator
++
()
{
a_it
++
;
// check if a_it is at the end
if
(
a_it
.
isEnd
()
==
false
)
return
*
this
;
else
{
// switch to the new grid
g_c
++
;
// get the next grid iterator
a_it
=
a_it
=
gList
[
g_c
].
getIterator
();
// increment to a valid point
a_it
++
;
}
return
*
this
;
}
/*! \brief Check if there is the next element
*
* \return true if there is the next, false otherwise
*
*/
bool
isEnd
()
{
// If there are no other grid stop
if
(
g_c
>=
gList
.
size
())
return
true
;
}
/*! \brief Get the actual key
*
* \return the actual key
*
*/
grid_dist_key_dx
<
dim
>
get
()
{
return
a_it
;
}
};
/*! \brief This is a distributed grid
*
* Implementation of a distributed grid. A distributed grid is a grid distributed
* across processors
*
* \param dim Dimensionality of the grid
* \param T type of grid
* \param Decomposition Class that decompose the grid for example CartDecomposition
* \param Mem Is the allocator
* \param device type of base structure is going to store the data
*
*/
template
<
unsigned
int
dim
,
typename
T
,
typename
Decomposition
,
typename
Mem
=
typename
memory_traits_lin
<
typename
T
::
type
>
::
type
,
typename
device_grid
=
grid_cpu
<
dim
,
T
>
>
class
grid_dist
{
//! Local grids
device_grid
*
loc_grid
;
//! Space Decomposition
Decomposition
dec
;
//! Size of the grid on each dimension
std
::
vector
<
size_t
>
g_res
;
//! Communicator class
Vcluster
&
v_cl
;
/*! \brief Get the grid size
*
* Get the grid size, given a domain, the resolution on it and another spaceBox
* it give the size on all directions of the local grid
*
* \param sp SpaceBox enclosing the local grid
* \param domain Space box enclosing the physical domain or part of it
* \param v_size grid size on this physical domain
*
* \return An std::vector representing the local grid on each dimension
*
*/
std
::
vector
<
size_t
>
getGridSize
(
SpaceBox
<
dim
,
typename
Decomposition
::
domain_type
>
&
sp
,
Box
<
dim
,
typename
Decomposition
::
domain_type
>
&
domain
,
std
::
vector
<
size_t
>
&
v_size
)
{
std
::
vector
<
size_t
>
tmp
;
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
//! Get the grid size compared to the domain space and its resolution
typename
Decomposition
::
domain_type
dim_sz
=
(
sp
.
getHigh
(
d
)
-
sp
.
getLow
(
d
))
/
((
domain
.
getHigh
(
d
)
-
domain
.
getLow
(
d
))
/
v_size
[
d
])
+
0.5
;
// push the size of the local grid
tmp
.
push_back
(
dim_sz
);
}
return
tmp
;
}
public:
//! constructor
grid_dist
(
Vcluster
v_cl
)
:
loc_grid
(
NULL
),
v_cl
(
v_cl
),
dec
(
*
global_v_cluster
)
{
}
//! constructor
grid_dist
()
:
loc_grid
(
NULL
),
v_cl
(
*
global_v_cluster
),
dec
(
*
global_v_cluster
)
{
}
/*! \brief Get the object that store the decomposition information
*
* \return the decomposition object
*
*/
Decomposition
&
getDecomposition
()
{
return
dec
;
}
/*! \brief Decompose the domain
*
* Decompose the domain
*
*/
void
Decompose
()
{
// ! Create an hyper-cube approximation.
// ! In order to work on grid_dist the decomposition
// ! has to be a set of hyper-cube
dec
.
hyperCube
();
// Get the number of local grid needed
size_t
n_grid
=
dec
.
getNHyperCube
();
// create local grids for each hyper-cube
loc_grid
=
new
device_grid
[
n_grid
];
// Each processing unit take a space
// Set the space associated to this process unit
dec
.
setSpace
(
v_cl
.
getProcessUnitID
());