Commit 30f0df95 authored by Pietro Incardona's avatar Pietro Incardona

Distributed Grids with periodic boundary conditions

parent efb84aa7
# Change Log # Change Log
All notable changes to this project will be documented in this file. All notable changes to this project will be documented in this file.
## [0.3.0] - ## [0.4.0] -
### Added
- Grid with periodic boundary conditions
- VTK Writer for distributed grid, now is the default writer
### Fixed
- GPU compilation
### Changed
## [0.3.0] - 16-04-2016
### Added ### Added
- Molacular Dynamic example - Molacular Dynamic example
...@@ -17,7 +29,7 @@ All notable changes to this project will be documented in this file. ...@@ -17,7 +29,7 @@ All notable changes to this project will be documented in this file.
- CartDecomposition parameter for the distributed structures is now optional - CartDecomposition parameter for the distributed structures is now optional
- template getPos<0>(), substituted by getPos() - template getPos<0>(), substituted by getPos()
## [0.2.1] - ## [0.2.1] - 01-04-2016
### Changed ### Changed
- GoogleChart name function changed: AddPointGraph to AddLinesGraph and AddColumsGraph to AddHistGraph - GoogleChart name function changed: AddPointGraph to AddLinesGraph and AddColumsGraph to AddHistGraph
......
...@@ -124,24 +124,24 @@ int main(int argc, char* argv[]) ...@@ -124,24 +124,24 @@ int main(int argc, char* argv[])
if (ct.isLocal(vd.getPos(key)) == false) if (ct.isLocal(vd.getPos(key)) == false)
std::cerr << "Error particle is not local" << "\n"; std::cerr << "Error particle is not local" << "\n";
// set the all the properties to 0.0 // set the all the properties to some numbers
// scalar // scalar
vd.template getProp<0>(key) = 0.0; vd.template getProp<0>(key) = 1.0;
vd.template getProp<1>(key)[0] = 0.0; vd.template getProp<1>(key)[0] = 1.0;
vd.template getProp<1>(key)[1] = 0.0; vd.template getProp<1>(key)[1] = 1.0;
vd.template getProp<1>(key)[2] = 0.0; vd.template getProp<1>(key)[2] = 1.0;
vd.template getProp<2>(key)[0][0] = 0.0; vd.template getProp<2>(key)[0][0] = 1.0;
vd.template getProp<2>(key)[0][1] = 0.0; vd.template getProp<2>(key)[0][1] = 1.0;
vd.template getProp<2>(key)[0][2] = 0.0; vd.template getProp<2>(key)[0][2] = 1.0;
vd.template getProp<2>(key)[1][0] = 0.0; vd.template getProp<2>(key)[1][0] = 1.0;
vd.template getProp<2>(key)[1][1] = 0.0; vd.template getProp<2>(key)[1][1] = 1.0;
vd.template getProp<2>(key)[1][2] = 0.0; vd.template getProp<2>(key)[1][2] = 1.0;
vd.template getProp<2>(key)[2][0] = 0.0; vd.template getProp<2>(key)[2][0] = 1.0;
vd.template getProp<2>(key)[2][1] = 0.0; vd.template getProp<2>(key)[2][1] = 1.0;
vd.template getProp<2>(key)[2][2] = 0.0; vd.template getProp<2>(key)[2][2] = 1.0;
cnt++; cnt++;
...@@ -165,7 +165,7 @@ int main(int argc, char* argv[]) ...@@ -165,7 +165,7 @@ int main(int argc, char* argv[])
// Output the particle position for each processor // Output the particle position for each processor
// //
vd.write("output"); vd.write("output",VTK_WRITER);
// //
// ### WIKI 10 ### // ### WIKI 10 ###
......
openfpm_data @ 84d42223
Subproject commit fcbefd1d58de10a473e9753645253afb4416b89a Subproject commit 84d4222309a64ba0a9336b339abfc13a958bf4e4
openfpm_devices @ d46372d3
Subproject commit 8595ed09e6f710ecbbfe8c968cca2fd645e67435 Subproject commit d46372d3db114dd2dc95b3b03d7d9b906287d253
openfpm_io @ bc080489
Subproject commit d07c3c7848e446437526d0bbda0843c18ab6a925 Subproject commit bc080489d4254f6d91c18d9f08c5cd1cc0f5718f
...@@ -931,11 +931,22 @@ public: ...@@ -931,11 +931,22 @@ public:
* \return the periodicity in direction i * \return the periodicity in direction i
* *
*/ */
size_t isPeriodic(size_t i) inline size_t periodicity(size_t i)
{ {
return bc[i]; return bc[i];
} }
/*! \brief Get the periodicity
*
*
* \return the periodicity
*
*/
inline const size_t (& periodicity() const) [dim]
{
return bc;
}
/*! \brief Set the parameter of the decomposition /*! \brief Set the parameter of the decomposition
* *
* \param div_ storing into how many sub-sub-domains to decompose on each dimension * \param div_ storing into how many sub-sub-domains to decompose on each dimension
......
...@@ -191,6 +191,14 @@ struct N_box ...@@ -191,6 +191,14 @@ struct N_box
// near processor sector position (or where they live outside the domain) // near processor sector position (or where they live outside the domain)
openfpm::vector<comb<dim>> pos; openfpm::vector<comb<dim>> pos;
// Number of real sub-domains or sub-domain in the central sector
size_t n_real_sub;
// When a sub-domain is not in the central sector, it mean that has been created
// because of periodicity in a non central sector. Any sub-domain not in the central
// sector is linked to one sub-domain in the central sector
openfpm::vector<size_t> r_sub;
//! Default constructor //! Default constructor
N_box() N_box()
:id((size_t)-1) :id((size_t)-1)
...@@ -218,6 +226,8 @@ struct N_box ...@@ -218,6 +226,8 @@ struct N_box
id = ele.id; id = ele.id;
bx = ele.bx; bx = ele.bx;
pos = ele.pos; pos = ele.pos;
n_real_sub = ele.n_real_sub;
r_sub = ele.r_sub;
return * this; return * this;
} }
...@@ -231,7 +241,9 @@ struct N_box ...@@ -231,7 +241,9 @@ struct N_box
{ {
id = ele.id; id = ele.id;
bx.swap(ele.bx); bx.swap(ele.bx);
pos = ele.pos; pos.swap(ele.pos);
n_real_sub = ele.n_real_sub;
r_sub.swap(ele.r_sub);
return * this; return * this;
} }
...@@ -249,6 +261,12 @@ struct N_box ...@@ -249,6 +261,12 @@ struct N_box
if (pos != ele.pos) if (pos != ele.pos)
return false; return false;
if (r_sub != ele.r_sub)
return false;
if (n_real_sub != ele.n_real_sub)
return false;
return bx == ele.bx; return bx == ele.bx;
} }
......
...@@ -110,26 +110,39 @@ class ie_ghost ...@@ -110,26 +110,39 @@ class ie_ghost
* Consider Processor 5 sending to processor 6 * Consider Processor 5 sending to processor 6
* its sub-domains, including the one in figure with id 0 in the list, and * its sub-domains, including the one in figure with id 0 in the list, and
* receive from processor 6 the sub-domain in figure as id 9. Consider also * receive from processor 6 the sub-domain in figure as id 9. Consider also
* we have 16 processor, E0_9 come from the intersection of the sub-domains * we have 16 processor. E0_9 come from the intersection of the expanded sub-domain
* 0 and 9 (Careful the id is related to the send and receive position in the list) * 0 with 9 (Careful the id is related to the send and receive position in the list)
* and the intersection is in the sector 0
* *
* The id of the external box and (and linked internal) is calculated as
* *
* (0 * (Number of sub-domains received from 6) + 9) * 16 + 6 * The id of the external box (for processor 5) is calculated as
* *
* \param k sub-domain sent ( 0 ) * ((k * N_b + b) * v_cl.getProcessingUnits() + p_id) * openfpm::math::pow(3,dim) + c.lin()
* \param b sub-domain received ( 9 ) *
* The parameter assume a different meaning if they the formula is used for calculating
* external/internal ghost boxes id
*
* \param k expanded sub-domain sent/received to/from p_id ( 0 )
* \param b sub-domain received/sent from/to p_id ( 9 )
* \param p_id processor id ( 6 ) * \param p_id processor id ( 6 )
* \param c sector where the sub-domain b live * \param c sector where the sub-domain b live
* \param N_b number of sub-domain received from p_id * \param N_b number of sub-domain received/sent from/to p_id
* \param v_cl Vcluster * \param v_cl Vcluster
* \param ei indicate if the formula is used to calculate external (true) or internal (false) ids
*
* \return id of the external/internal ghost
* *
* \return id of the external box * \note To an explanation about the sectors see getShiftVectors
* *
*/ */
inline size_t ebx_ibx_form(size_t k, size_t b, size_t p_id, const comb<dim> & c ,size_t N_b, Vcluster & v_cl) inline size_t ebx_ibx_form(size_t k, size_t b, size_t p_id, const comb<dim> & c ,size_t N_b, Vcluster & v_cl, const bool ei)
{ {
return ((k * N_b + b) * v_cl.getProcessingUnits() + p_id) * openfpm::math::pow(3,dim) + c.lin(); comb<dim> cext = c;
if (ei == true)
cext.sign_flip();
return ((k * N_b + b) * v_cl.getProcessingUnits() + p_id) * openfpm::math::pow(3,dim) + cext.lin();
} }
protected: protected:
...@@ -218,9 +231,13 @@ protected: ...@@ -218,9 +231,13 @@ protected:
// used later // used later
Box_dom<dim,T> & proc_int_box_g = proc_int_box.get(nn_p.ProctoID(p_id)); Box_dom<dim,T> & proc_int_box_g = proc_int_box.get(nn_p.ProctoID(p_id));
// get the set of sub-domains of the near processor p_id // Number of received sub-domains
size_t n_r_sub = nn_p.getNRealSubdomains(p_id);
// get the set of sub-domains, sector position, and real sub-domain id of the near processor p_id
const openfpm::vector< ::Box<dim,T> > & nn_processor_subdomains_g = nn_p.getNearSubdomains(p_id); const openfpm::vector< ::Box<dim,T> > & nn_processor_subdomains_g = nn_p.getNearSubdomains(p_id);
const openfpm::vector< comb<dim> > & nnpsg_pos = nn_p.getNearSubdomainsPos(p_id); const openfpm::vector< comb<dim> > & nnpsg_pos = nn_p.getNearSubdomainsPos(p_id);
const openfpm::vector< size_t > & r_sub = nn_p.getNearSubdomainsRealId(p_id);
// used later // used later
openfpm::vector< ::Box<dim,T> > & box_nn_processor_int_gg = box_nn_processor_int.get(i).get(j).bx; openfpm::vector< ::Box<dim,T> > & box_nn_processor_int_gg = box_nn_processor_int.get(i).get(j).bx;
...@@ -261,7 +278,7 @@ protected: ...@@ -261,7 +278,7 @@ protected:
// Search where the sub-domain i is in the sent list for processor p_id // Search where the sub-domain i is in the sent list for processor p_id
size_t k = link_ebx_ibx(nn_p,p_id,i); size_t k = link_ebx_ibx(nn_p,p_id,i);
proc_int_box_g.ebx.last().id = ebx_ibx_form(k,b,p_id,nnpsg_pos.get(b),nn_processor_subdomains_g.size(),v_cl); proc_int_box_g.ebx.last().id = ebx_ibx_form(k,r_sub.get(b),p_id,nnpsg_pos.get(b),n_r_sub,v_cl,true);
} }
} }
} }
...@@ -296,15 +313,18 @@ protected: ...@@ -296,15 +313,18 @@ protected:
// For each processor contiguous to this sub-domain // For each processor contiguous to this sub-domain
for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++) for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++)
{ {
// Contiguous processor // Near processor
size_t p_id = box_nn_processor.get(i).get(j); size_t p_id = box_nn_processor.get(i).get(j);
// get the set of sub-domains of the contiguous processor p_id // get the set of sub-domains of the near processor p_id
const openfpm::vector< ::Box<dim,T> > & nn_p_box = nn_p.getNearSubdomains(p_id); const openfpm::vector< ::Box<dim,T> > & nn_p_box = nn_p.getNearSubdomains(p_id);
// get the sector position for each sub-domain in the list // get the sector position for each sub-domain in the list
const openfpm::vector< comb<dim> > nn_p_box_pos = nn_p.getNearSubdomainsPos(p_id); const openfpm::vector< comb<dim> > nn_p_box_pos = nn_p.getNearSubdomainsPos(p_id);
// get the real sub-domain id for each sub-domain
const openfpm::vector<size_t> r_sub = nn_p.getNearSubdomainsRealId(p_id);
// get the local processor id // get the local processor id
size_t lc_proc = nn_p.getNearProcessor(p_id); size_t lc_proc = nn_p.getNearProcessor(p_id);
...@@ -363,7 +383,7 @@ protected: ...@@ -363,7 +383,7 @@ protected:
size_t s = link_ebx_ibx(nn_p,p_id,i); size_t s = link_ebx_ibx(nn_p,p_id,i);
// calculate the id of the internal box // calculate the id of the internal box
sb.id = ebx_ibx_form(k,s,v_cl.getProcessUnitID(),nn_p_box_pos.get(k),nn_p.getSentSubdomains(p_idp).size(),v_cl); sb.id = ebx_ibx_form(r_sub.get(k),s,v_cl.getProcessUnitID(),nn_p_box_pos.get(k),nn_p.getSentSubdomains(p_idp).size(),v_cl,false);
Box_dom<dim,T> & pr_box_int = proc_int_box.get(nn_p.ProctoID(p_id)); Box_dom<dim,T> & pr_box_int = proc_int_box.get(nn_p.ProctoID(p_id));
pr_box_int.ibx.add(sb); pr_box_int.ibx.add(sb);
......
...@@ -74,21 +74,23 @@ class nn_prcs ...@@ -74,21 +74,23 @@ class nn_prcs
/*! \brief add sub-domains to processor for a near processor i /*! \brief add sub-domains to processor for a near processor i
* *
* \param v_cl = nnp.v_cl; i near processor * \param i near processor
* \param r_sub real sub-domain id
* \param bx Box to add * \param bx Box to add
* \param c from which sector the sub-domain come from * \param c from which sector the sub-domain come from
* *
*/ */
inline void add_nn_subdomain(size_t i, const Box<dim,T> & bx, const comb<dim> & c) inline void add_nn_subdomain(size_t i, size_t r_sub, const Box<dim,T> & bx, const comb<dim> & c)
{ {
N_box<dim,T> & nnpst = nn_processor_subdomains_tmp[i]; N_box<dim,T> & nnpst = nn_processor_subdomains_tmp[i];
nnpst.bx.add(bx); nnpst.bx.add(bx);
nnpst.pos.add(c); nnpst.pos.add(c);
nnpst.r_sub.add(r_sub);
} }
/*! \brief In case of periodic boundary conditions we replicate the sub-domains at the border /*! \brief In case of periodic boundary conditions we replicate the sub-domains at the border
* *
* \param domain Domain box * \param domain Domain
* \param boundary boundary conditions * \param boundary boundary conditions
* \param ghost ghost part * \param ghost ghost part
* *
...@@ -150,7 +152,7 @@ class nn_prcs ...@@ -150,7 +152,7 @@ class nn_prcs
if (sub.Intersect(bp,b_int) == true) if (sub.Intersect(bp,b_int) == true)
{ {
sub += shift; sub += shift;
add_nn_subdomain(IDtoProc(k),sub,cmbs[j]); add_nn_subdomain(IDtoProc(k),l,sub,cmbs[j]);
} }
} }
} }
...@@ -177,6 +179,7 @@ class nn_prcs ...@@ -177,6 +179,7 @@ class nn_prcs
nnps.bx.add(nnps_tmp.bx.get(i)); nnps.bx.add(nnps_tmp.bx.get(i));
nnps.pos.add(nnps_tmp.pos.get(i)); nnps.pos.add(nnps_tmp.pos.get(i));
nnps.r_sub.add(nnps_tmp.r_sub.get(i));
} }
} }
...@@ -348,6 +351,8 @@ public: ...@@ -348,6 +351,8 @@ public:
N_box<dim,T> & nnps = nn_processor_subdomains[it->first]; N_box<dim,T> & nnps = nn_processor_subdomains[it->first];
nnps.pos.add(c); nnps.pos.add(c);
nnps.r_sub.add(i);
nnps.n_real_sub = nnps.bx.size();
} }
} }
} }
...@@ -374,7 +379,27 @@ public: ...@@ -374,7 +379,27 @@ public:
return nn_processors.get(id); return nn_processors.get(id);
} }
/*! \brief Get the sub-domain of a near processor /*! \brief Get the real-id of the sub-domains of a near processor
*
* \param p_id near processor rank
*
* \return the sub-domains real id
*
*/
inline const openfpm::vector< size_t > & getNearSubdomainsRealId(size_t p_id) const
{
auto key = nn_processor_subdomains.find(p_id);
#ifdef SE_CLASS1
if (key == nn_processor_subdomains.end())
{
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " error this process rank is not adjacent to the local processor";
}
#endif
return key->second.r_sub;
}
/*! \brief Get the sub-domains of a near processor
* *
* \param p_id near processor rank * \param p_id near processor rank
* *
...@@ -394,11 +419,33 @@ public: ...@@ -394,11 +419,33 @@ public:
return key->second.bx; return key->second.bx;
} }
/*! \brief Get the sub-domain of a near processor /*! \brief Get the number of real sub-domains of a near processor
*
* \note the real sub-domain are the subdomain in the central sector, or any sub-domain that has not been create because of boundary conditions
*
* \param p_id near processor rank
*
* \return the number of real sub-domains
*
*/
inline size_t getNRealSubdomains(size_t p_id) const
{
auto key = nn_processor_subdomains.find(p_id);
#ifdef SE_CLASS1
if (key == nn_processor_subdomains.end())
{
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " error this process rank is not adjacent to the local processor";
}
#endif
return key->second.n_real_sub;
}
/*! \brief Get the sub-domains sector position of a near processor
* *
* \param p_id near processor rank * \param p_id near processor rank
* *
* \return the sub-domains * \return the sub-domains positions
* *
*/ */
inline const openfpm::vector< comb<dim> > & getNearSubdomainsPos(size_t p_id) const inline const openfpm::vector< comb<dim> > & getNearSubdomainsPos(size_t p_id) const
...@@ -413,7 +460,7 @@ public: ...@@ -413,7 +460,7 @@ public:
return key->second.pos; return key->second.pos;
} }
/*! \brief Get the adjacent processor id /*! \brief Get the near processor id
* *
* \param p_id adjacent processor rank * \param p_id adjacent processor rank
* *
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#define GRID_SUB_UNIT_FACTOR 64 #define GRID_SUB_UNIT_FACTOR 64
/*! \brief This is a distributed grid /*! \brief This is a distributed grid
* *
* Implementation of a distributed grid the decomposition is geometrical, grid * Implementation of a distributed grid the decomposition is geometrical, grid
...@@ -140,7 +139,7 @@ class grid_dist_id ...@@ -140,7 +139,7 @@ class grid_dist_id
// Get the internal ghost boxes and transform into grid units // Get the internal ghost boxes and transform into grid units
::Box<dim,St> ib_dom = dec.getProcessorIGhostBox(i,j); ::Box<dim,St> ib_dom = dec.getProcessorIGhostBox(i,j);
ib_dom -= cd_sm.getOrig(); ib_dom -= cd_sm.getOrig();
::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); ::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom,dec.periodicity());
// Check if ib is valid if not it mean that the internal ghost does not contain information so skip it // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it
if (ib.isValid() == false) if (ib.isValid() == false)
...@@ -182,7 +181,7 @@ class grid_dist_id ...@@ -182,7 +181,7 @@ class grid_dist_id
// Get the external ghost boxes and transform into grid units // Get the external ghost boxes and transform into grid units
::Box<dim,St> ib_dom = dec.getProcessorEGhostBox(i,j); ::Box<dim,St> ib_dom = dec.getProcessorEGhostBox(i,j);
ib_dom -= cd_sm.getOrig(); ib_dom -= cd_sm.getOrig();
::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); ::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom,dec.periodicity());
// Check if ib is valid if not it mean that the internal ghost does not contain information so skip it // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it
if (ib.isValid() == false) if (ib.isValid() == false)
...@@ -238,7 +237,7 @@ class grid_dist_id ...@@ -238,7 +237,7 @@ class grid_dist_id
// Get the internal ghost boxes and transform into grid units // Get the internal ghost boxes and transform into grid units
::Box<dim,St> ib_dom = dec.getLocalIGhostBox(i,j); ::Box<dim,St> ib_dom = dec.getLocalIGhostBox(i,j);
ib_dom -= cd_sm.getOrig(); ib_dom -= cd_sm.getOrig();
::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); ::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom,dec.periodicity());
// Check if ib is valid if not it mean that the internal ghost does not contain information so skip it // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it
if (ib.isValid() == false) if (ib.isValid() == false)
...@@ -277,7 +276,7 @@ class grid_dist_id ...@@ -277,7 +276,7 @@ class grid_dist_id
// Get the internal ghost boxes and transform into grid units // Get the internal ghost boxes and transform into grid units
::Box<dim,St> ib_dom = dec.getLocalEGhostBox(i,j); ::Box<dim,St> ib_dom = dec.getLocalEGhostBox(i,j);
ib_dom -= cd_sm.getOrig(); ib_dom -= cd_sm.getOrig();
::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); ::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom,dec.periodicity());
// Warning even if the ib is not a valid in grid unit we are forced to keep it // Warning even if the ib is not a valid in grid unit we are forced to keep it
// otherwise the value returned from dec.getLocalEGhostSub(i,j) will point to an // otherwise the value returned from dec.getLocalEGhostSub(i,j) will point to an
...@@ -455,14 +454,20 @@ class grid_dist_id ...@@ -455,14 +454,20 @@ class grid_dist_id
* *
* *
*/ */
inline void InitializeCellDecomposer(const size_t (& g_sz)[dim]) inline void InitializeCellDecomposer(const size_t (& g_sz)[dim], const size_t (& bc)[dim])
{ {
// check that the grid has valid size // check that the grid has valid size
check_size(g_sz); check_size(g_sz);
// For a 5x5 grid you have 4x4 Cell // For a 5x5 grid you have 4x4 Cell (With the exception of periodic)
size_t c_g[dim]; size_t c_g[dim];
for (size_t i = 0 ; i < dim ; i++) {c_g[i] = (g_sz[i]-1 > 0)?(g_sz[i]-1):1;} for (size_t i = 0 ; i < dim ; i++)
{
if (bc[i] == NON_PERIODIC)
c_g[i] = (g_sz[i]-1 > 0)?(g_sz[i]-1):1;
else
c_g[i] = g_sz[i];
}
// Initialize the cell decomposer // Initialize the cell decomposer
cd_sm.setDimensions(domain,c_g,0); cd_sm.setDimensions(domain,c_g,0);
...@@ -473,7 +478,7 @@ class grid_dist_id ...@@ -473,7 +478,7 @@ class grid_dist_id
* \param g_sz Global size of the grid * \param g_sz Global size of the grid
* *
*/ */
inline void InitializeDecomposition(const size_t (& g_sz)[dim]) inline void InitializeDecomposition(const size_t (& g_sz)[dim], const size_t (& bc)[dim])
{ {
// fill the global size of the grid // fill the global size of the grid
for (size_t i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];} for (size_t i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
...@@ -489,11 +494,6 @@ class grid_dist_id ...@@ -489,11 +494,6 @@ class grid_dist_id
for (size_t i = 0 ; i < dim ; i++) for (size_t i = 0 ; i < dim ; i++)
{div[i] = openfpm::math::round_big_2(pow(n_sub,1.0/dim));} {div[i] = openfpm::math::round_big_2(pow(n_sub,1.0/dim));}
// boundary conditions
size_t bc[dim];
for (size_t i = 0 ; i < dim ; i++)
bc[i] = NON_PERIODIC;
// Create the sub-domains // Create the sub-domains
dec.setParameters(div,domain,bc,ghost); dec.setParameters(div,domain,bc,ghost);
dec.decompose(); dec.decompose();
...@@ -636,8 +636,6 @@ public: ...@@ -636,8 +636,6 @@ public:
check_new(this,8,GRID_DIST_EVENT,4); check_new(this,8,GRID_DIST_EVENT,4);
#endif #endif
this->dec.incRef();
size_t ext_dim[dim]; size_t ext_dim[dim];
for (size_t i = 0 ; i < dim ; i++) {ext_dim[i] = g.getGridInfoVoid().size(i) + ext.getKP1().get(i) + ext.getKP2().get(i);} for (size_t i = 0 ; i < dim ; i++) {ext_dim[i] = g.getGridInfoVoid().size(i) + ext.getKP1().get(i) + ext.getKP2().get(i);}
...@@ -675,10 +673,11 @@ public: ...@@ -675,10 +673,11 @@ public:
grid_dist_id(const Decomposition & dec, const size_t (& g_sz)[dim], const Box<dim,St> & domain, const Ghost<dim,St> & ghost) grid_dist_id(const Decomposition & dec, const size_t (& g_sz)[dim], const Box<dim,St> & domain, const Ghost<dim,St> & ghost)
:domain(domain),ghost(ghost),dec(dec),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz) :domain(domain),ghost(ghost),dec(dec),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz)
{ {
// Increment the reference counter of the decomposition #ifdef SE_CLASS2
this->dec.incRef(); check_new(this,8,GRID_DIST_EVENT,4);
#endif
InitializeCellDecomposer(g_sz); InitializeCellDecomposer(g_sz,create_non_periodic<dim>().bc);
InitializeStructures(g_sz); InitializeStructures(g_sz);
} }
...@@ -761,20 +760,49 @@ public: ...@@ -761,20 +760,49 @@ public:
* *
*/ */
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g) grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g)
:grid_dist_id(g_sz,domain,g,create_non_periodic<dim>())
{
}
/*! It construct a grid of a specified size, defined on a specified Box space, having a specified ghost size and periodicity
*
* \param dec Decomposition
* \param g_sz grid size on each dimension
* \param domain Box that contain the grid
* \param ghost Ghost part of the domain (given in grid units)
* \param p Boundary conditions
*
* \warning In very rare case the ghost part can be one point bigger than the one specified
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,long int> & g)
:grid_dist_id(g_sz,domain,g,create_non_periodic<dim>())
{
}
/*! It construct a grid of a specified size, defined on a specified Box space, having a specified ghost size, and specified periodicity
*
* \param g_sz grid size on each dimension
* \param domain Box that contain the grid
* \param ghost Ghost part (given in grid units)
* \param p Boundary conditions
*
* \warning In very rare case the ghost part can be one point bigger than the one specified
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g, const periodicity<dim> & p)
:domain(domain),ghost(g),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz) :domain(domain),ghost(g),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz)
{ {
#ifdef SE_CLASS2 #ifdef SE_CLASS2
check_new(this,8,GRID_DIST_EVENT,4); check_new(this,8,GRID_DIST_EVENT,4);
#endif #endif
// Increment the reference counter of the decomposition
this->dec.incRef();
InitializeCellDecomposer(g_sz);