Newer
Older
* Created on: Oct 07, 2015
* Author: Pietro Incardona, Antonio Leo
*/
#ifndef CARTDECOMPOSITION_HPP
#define CARTDECOMPOSITION_HPP
#include "config.h"
#include "Graph/CartesianGraphFactory.hpp"
#include <vector>
#include <initializer_list>
#include "SubdomainGraphNodes.hpp"
#include "dec_optimizer.hpp"
#include "Space/Shape/Box.hpp"
#include <unordered_map>
#include "NN/CellList/CellList.hpp"
#include "common.hpp"
#include "ie_loc_ghost.hpp"
#include "ie_ghost.hpp"
#include "nn_processor.hpp"
#include "GraphMLWriter/GraphMLWriter.hpp"
#include "Distribution/ParMetisDistribution.hpp"
#include "Distribution/DistParMetisDistribution.hpp"
#include "Distribution/MetisDistribution.hpp"
#include "DLB/DLB.hpp"
#include "util/mathutil.hpp"
#include "data_type/aggregate.hpp"
#include "Domain_NN_calculator_cart.hpp"
#define CARTDEC_ERROR 2000lu
* \brief This class decompose a space into sub-sub-domains and distribute them across processors
*
* \tparam dim is the dimensionality of the physical domain we are going to decompose.
* \tparam T type of the space we decompose, Real, Integer, Complex ...
* \tparam Memory Memory factory used to allocate memory
* \tparam Distribution type of distribution, can be ParMetisDistribution or MetisDistribution
* Given an N-dimensional space, this class decompose the space into a Cartesian grid of small
* sub-sub-domain. To each sub-sub-domain is assigned an id that identify at which processor is
* assigned (in general the union of all the sub-sub-domain assigned to a processor is
* simply connected space), a second step merge several sub-sub-domain with same id into bigger region
* sub-domain. Each sub-domain has an extended space called ghost part
*
* Assuming that VCluster.getProcessUnitID(), equivalent to the MPI processor rank, return the processor local
* processor id, we define
*
* * local processor: processor rank
* * local sub-domain: sub-domain given to the local processor
* * external ghost box: (or ghost box) are the boxes that compose the ghost space of the processor, or the
* boxes produced expanding every local sub-domain by the ghost extension and intersecting with the sub-domain
* of the other processors
* * Near processors are the processors adjacent to the local processor, where with adjacent we mean all the processor
* that has a non-zero intersection with the ghost part of the local processor, or all the processors that
* produce non-zero external boxes with the local processor, or all the processor that should communicate
* in case of ghost data synchronization
* * internal ghost box: is the part of ghost of the near processor that intersect the space of the
* processor, or the boxes produced expanding the sub-domain of the near processors with the local sub-domain
* * Near processor sub-domain: is a sub-domain that live in the a near (or contiguous) processor
* * Near processor list: the list of all the near processor of the local processor (each processor has a list
* of the near processor)
* * Local ghosts internal or external are all the ghosts that does not involve inter-processor communications
*
* \see calculateGhostBoxes() for a visualization of internal and external ghost boxes
* ### Create a Cartesian decomposition object on a Box space, distribute, calculate internal and external ghost boxes
* \snippet CartDecomposition_unit_test.hpp Create CartDecomposition
*
template<unsigned int dim, typename T, typename Memory, typename Distribution>
class CartDecomposition: public ie_loc_ghost<dim, T>, public nn_prcs<dim, T>, public ie_ghost<dim, T>, public domain_nn_calculator_cart<dim>
//! Type of the domain we are going to decompose
typedef T domain_type;
//! It simplify to access the SpaceBox element
typedef SpaceBox<dim, T> Box;
//! This class is base of itself
typedef CartDecomposition<dim,T,Memory,Distribution> base_type;
//! This class admit a class defined on an extended domain
typedef CartDecomposition_ext<dim,T,Memory,Distribution> extended_type;
protected:
//! Indicate the communication weight has been set
bool commCostSet = false;
//! This is the key type to access data_s, for example in the case of vector
typedef typename openfpm::vector<SpaceBox<dim, T>,
Memory,
typename memory_traits_lin<SpaceBox<dim, T>>::type,
memory_traits_lin,
openfpm::vector_grow_policy_default,
openfpm::vect_isel<SpaceBox<dim, T>>::value>::access_key acc_key;
//! the set of all local sub-domain as vector
openfpm::vector<SpaceBox<dim, T>> sub_domains;
//! for each sub-domain, contain the list of the neighborhood processors
openfpm::vector<openfpm::vector<long unsigned int> > box_nn_processor;
//! Structure that contain for each sub-sub-domain box the processor id
//! Structure that store the cartesian grid information
grid_sm<dim, void> gr;
//! Structure that store the cartesian grid information
grid_sm<dim, void> gr_dist;
//! Structure that decompose your structure into cell without creating them
//! useful to convert positions to CellId or sub-domain id in this case
CellDecomposer_sm<dim, T, shift<dim,T>> cd;
//! Magnification factor between distribution and
//! decomposition
size_t magn[dim];
//! Runtime virtual cluster machine
Vcluster & v_cl;
Distribution dist;
//! reference counter of the object in case is shared between object
long int ref_cnt;
Ghost<dim,T> ghost;
//! Processor domain bounding box
::Box<dim,size_t> proc_box;
//! set of Boxes produced by the decomposition optimizer
openfpm::vector<::Box<dim, size_t>> loc_box;
/*! \brief It convert the box from the domain decomposition into sub-domain
*
* The decomposition box from the domain-decomposition contain the box in integer
* coordinates. This box is converted into a continuos box. It also adjust loc_box
* if the distribution grid and the decomposition grid are different.
* \return the corresponding sub-domain
template<typename Memory_bx> SpaceBox<dim,T> convertDecBoxIntoSubDomain(encapc<1,::Box<dim,size_t>,Memory_bx> loc_box)
{
// A point with all coordinate to one
size_t one[dim];
for (size_t i = 0 ; i < dim ; i++) {one[i] = 1;}
SpaceBox<dim, size_t> sub_dc = loc_box;
SpaceBox<dim, size_t> sub_dce = sub_dc;
sub_dce.expand(one);
sub_dce.mul(magn);
// shrink by one
for (size_t i = 0 ; i < dim ; i++)
{
loc_box.template get<Box::p1>()[i] = sub_dce.getLow(i);
loc_box.template get<Box::p2>()[i] = sub_dce.getHigh(i) - 1;
}
SpaceBox<dim, T> sub_d(sub_dce);
sub_d.mul(spacing);
sub_d += domain.getP1();
// we add the
// Fixing sub-domains to cover all the domain
// Fixing sub_d
// if (loc_box) is at the boundary we have to ensure that the box span the full
// domain (avoiding rounding off error)
for (size_t i = 0; i < dim; i++)
{
if (sub_dc.getHigh(i) == gr.size(i) - 1)
sub_d.setHigh(i, domain.getHigh(i));
if (sub_dc.getLow(i) == 0)
sub_d.setLow(i,domain.getLow(i));
}
return sub_d;
}
/*! \brief Constructor, it decompose and distribute the sub-domains across the processors
* \param v_cl Virtual cluster, used internally for communications
* \param opt option (one option is to construct)
void createSubdomains(Vcluster & v_cl, const size_t (& bc)[dim], size_t opt = 0)
int p_id = v_cl.getProcessUnitID();
// Calculate the total number of box and and the spacing
// on each direction
// Get the box containing the domain
SpaceBox<dim, T> bs = domain.getBox();
for (unsigned int i = 0; i < dim; i++)
spacing[i] = (bs.getHigh(i) - bs.getLow(i)) / gr.size(i);
// fill the structure that store the processor id for each sub-domain
// Optimize the decomposition creating bigger spaces
// And reducing Ghost over-stress
dec_optimizer<dim, Graph_CSR<nm_v, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
// Ghost
Ghost<dim,long int> ghe;
// Set the ghost
for (size_t i = 0 ; i < dim ; i++)
{
ghe.setLow(i,static_cast<long int>(ghost.getLow(i)/spacing[i]) - 1);
ghe.setHigh(i,static_cast<long int>(ghost.getHigh(i)/spacing[i]) + 1);
}
d_o.template optimize<nm_v::sub_id, nm_v::proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
bbox = convertDecBoxIntoSubDomain(loc_box.get(0));
sub_domains.add(bbox);
for (size_t s = 1; s < loc_box.size(); s++)
SpaceBox<dim,T> sub_d = convertDecBoxIntoSubDomain(loc_box.get(s));
// add the sub-domain
sub_domains.add(sub_d);
// Calculate the bound box
bbox.enclose(sub_d);
nn_prcs<dim,T>::create(box_nn_processor, sub_domains);
// fine_s structure contain the processor id for each sub-sub-domain
// with sub-sub-domain we mean the sub-domain decomposition before
// running dec_optimizer (before merging sub-domains)
grid_key_dx_iterator<dim> git(gr);
while (git.isNext())
{
auto key = git.get();
grid_key_dx<dim> key2;
for (size_t i = 0 ; i < dim ; i++)
key2.set_d(i,key.get(i) / magn[i]);
size_t lin = gr_dist.LinId(key2);
size_t lin2 = gr.LinId(key);
fine_s.get(lin2) = dist.getGraph().template vertex_p<nm_v::proc_id>(lin);
++git;
Initialize_geo_cell_lists();
}
/*! \brief Initialize geo_cell lists
*
*
*
*/
void Initialize_geo_cell_lists()
{
// Get the processor bounding Box
::Box<dim,T> bound = getProcessorBounds();
// Not necessary, but I prefer
bound.enlarge(ghost);
for (size_t i = 0; i < dim; i++)
div[i] = (size_t) ((bound.getHigh(i) - bound.getLow(i)) / cd.getCellBox().getP2()[i]);
// Initialize the geo_cell structure
ie_ghost<dim,T>::Initialize_geo_cell(bound,div);
// Initialize shift vectors
ie_ghost<dim,T>::generateShiftVectors(domain);
/*! \brief Calculate communication and migration costs
*
* \param ts how many timesteps have passed since last calculation, used to approximate the cost
*/
void computeCommunicationAndMigrationCosts(size_t ts)
{
SpaceBox<dim, T> cellBox = cd.getCellBox();
float b_s = static_cast<float>(cellBox.getHigh(0));
float gh_s = static_cast<float>(ghost.getHigh(0));
// compute the gh_area for 2 dim case
float gh_v = (gh_s * b_s);
// multiply for sub-sub-domain side for each domain
gh_v *= b_s;
size_t norm = (size_t) (1.0 / gh_v);
migration = pow(b_s, dim);
size_t prev = 0;
for (size_t i = 0; i < dist.getNSubSubDomains(); i++)
{
dist.setMigrationCost(i, norm * migration /* * dist.getSubSubDomainComputationCost(i)*/ );
for (size_t s = 0; s < dist.getNSubSubDomainNeighbors(i); s++)
{
// We have to remove dist.getSubSubDomainComputationCost(i) otherwise the graph is
// not directed
dist.setCommunicationCost(i, s, 1 /** dist.getSubSubDomainComputationCost(i)*/ * ts);
}
prev += dist.getNSubSubDomainNeighbors(i);
}
/*! \brief Create the sub-domain that decompose your domain
*
*/
void CreateSubspaces()
{
// Create a grid where each point is a space
grid_sm<dim, void> g(div);
// create a grid_key_dx iterator
grid_key_dx_iterator<dim> gk_it(g);
// Divide the space into subspaces
while (gk_it.isNext())
{
//! iterate through all subspaces
grid_key_dx<dim> key = gk_it.get();
//! Create a new subspace
SpaceBox<dim, T> tmp;
for (int i = 0; i < dim; i++)
tmp.setHigh(i, (key.get(i) + 1) * spacing[i]);
tmp.setLow(i, key.get(i) * spacing[i]);
}
//! add the space box
sub_domains.add(tmp);
/*! \brief It calculate the internal ghost boxes
*
* Example: Processor 10 calculate
* B8_0 B9_0 B9_1 and B5_0
*
*
*
\verbatim
+----------------------------------------------------+
| |
| Processor 8 |
| Sub+domain 0 +-----------------------------------+
| | |
| | |
++--------------+---+---------------------------+----+ Processor 9 |
| | | B8_0 | | Subdomain 0 |
| +------------------------------------+ |
| | | | | |
| | | |B9_0| |
| | B | Local processor | | |
| Processor 5 | 5 | Subdomain 0 | | |
| Subdomain 0 | _ | +----------------------------------------+
| | 0 | | | |
| | | | | |
| | | | | Processor 9 |
| | | |B9_1| Subdomain 1 |
| | | | | |
| | | | | |
| | | | | |
+--------------+---+---------------------------+----+ |
| |
+-----------------------------------+
\endverbatim
and also
G8_0 G9_0 G9_1 G5_0 (External ghost boxes)
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
\verbatim
+----------------------------------------------------+
| Processor 8 |
| Subdomain 0 +-----------------------------------+
| | |
| +---------------------------------------------+ |
| | G8_0 | | |
+-----+---------------+------------------------------------+ | Processor 9 |
| | | | | Subdomain 0 |
| | | |G9_0| |
| | | | | |
| | | | | |
| | | Local processor | | |
| Processor 5 | | Sub+domain 0 | | |
| Subdomain 0 | | +-----------------------------------+
| | | | | |
| | G | | | |
| | 5 | | | Processor 9 |
| | | | | | Subdomain 1 |
| | 0 | |G9_1| |
| | | | | |
| | | | | |
+---------------------+------------------------------------+ | |
| | | |
+----------------------------------------+----+------------------------------+
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
\endverbatim
*
*
*
* \param ghost margins for each dimensions (p1 negative part) (p2 positive part)
*
*
\verbatim
^ p2[1]
|
|
+----+----+
| |
| |
p1[0]<-----+ +----> p2[0]
| |
| |
+----+----+
|
v p1[1]
\endverbatim
*
*
*/
void calculateGhostBoxes()
{
// Intersect all the local sub-domains with the sub-domains of the contiguous processors
// create the internal structures that store ghost information
ie_ghost<dim, T>::create_box_nn_processor_ext(v_cl, ghost, sub_domains, box_nn_processor, *this);
ie_ghost<dim, T>::create_box_nn_processor_int(v_cl, ghost, sub_domains, box_nn_processor, *this);
ie_loc_ghost<dim,T>::create(sub_domains,domain,ghost,bc);
}
static constexpr int dims = dim;
//! Increment the reference counter
void incRef()
{ref_cnt++;}
//! Decrement the reference counter
void decRef()
{ref_cnt--;}
//! Return the reference counter
long int ref()
{
return ref_cnt;
}
/*! \brief Cartesian decomposition constructor
*
* \param v_cl Virtual cluster, used internally to handle or pipeline communication
CartDecomposition(Vcluster & v_cl)
:nn_prcs<dim, T>(v_cl), v_cl(v_cl), dist(v_cl),ref_cnt(0)
{
// Reset the box to zero
bbox.zero();
}
/*! \brief Cartesian decomposition copy constructor
*
* \param cart object to copy
*
*/
CartDecomposition(const CartDecomposition<dim,T,Memory> & cart)
:nn_prcs<dim,T>(cart.v_cl),v_cl(cart.v_cl),dist(v_cl),ref_cnt(0)
{
this->operator=(cart);
}
/*! \brief Cartesian decomposition copy constructor
*
* \param cart object to copy
*
*/
CartDecomposition(CartDecomposition<dim,T,Memory> && cart)
:nn_prcs<dim,T>(cart.v_cl),v_cl(cart.v_cl),dist(v_cl),ref_cnt(0)
{
this->operator=(cart);
}
//! Cartesian decomposition destructor
~CartDecomposition()
/*! \brief class to select the returned id by ghost_processorID
*
*/
class box_id
{
public:
/*! \brief Return the box id
*
* \param p structure containing the id informations
* \param b_id box_id
*
* \return box id
*
*/
inline static size_t id(p_box<dim, T> & p, size_t b_id)
{
return b_id;
}
};
/*! \brief class to select the returned id by ghost_processorID
*
*/
class processor_id
{
public:
/*! \brief Return the processor id
*
* \param p structure containing the id informations
* \param b_id box_id
*
* \return processor id
*
*/
inline static size_t id(p_box<dim, T> & p, size_t b_id)
{
return p.proc;
}
};
/*! \brief class to select the returned id by ghost_processorID
*
*/
class lc_processor_id
{
public:
/*! \brief Return the near processor id
*
* \param p structure containing the id informations
* \param b_id box_id
*
* \return local processor id
*
*/
inline static size_t id(p_box<dim, T> & p, size_t b_id)
{
return p.lc_proc;
}
};
/*! \brief class to select the returned id by ghost_processorID
*
*/
class shift_id
{
public:
/*! \brief Return the shift id
*
* \param p structure containing the id informations
* \param b_id box_id
*
* \return shift_id id
*
*/
inline static size_t id(p_box<dim,T> & p, size_t b_id)
{
return p.shift_id;
}
};
/*! \brief Apply boundary condition to the point
*
* If the particle go out to the right, bring back the particle on the left
* in case of periodic, nothing in case of non periodic
*
* \param pt Point to apply the boundary condition. (it's coordinated are changed according the
* the explanation before)
*
*/
void applyPointBC(float (& pt)[dim]) const
{
for (size_t i = 0 ; i < dim ; i++)
{
if (bc[i] == PERIODIC)
pt[i] = openfpm::math::periodic_l(pt[i],domain.getHigh(i),domain.getLow(i));
}
}
/*! \brief Apply boundary condition to the point
*
* If the particle go out to the right, bring back the particle on the left
* in case of periodic, nothing in case of non periodic
*
* \param pt Point to apply the boundary conditions.(it's coordinated are changed according the
* the explanation before)
*
*/
void applyPointBC(Point<dim,T> & pt) const
{
for (size_t i = 0 ; i < dim ; i++)
{
if (bc[i] == PERIODIC)
pt.get(i) = openfpm::math::periodic_l(pt.get(i),domain.getHigh(i),domain.getLow(i));
}
}
/*! \brief Apply boundary condition to the point
*
* If the particle go out to the right, bring back the particle on the left
* in case of periodic, nothing in case of non periodic
*
* \param pt encapsulated point object (it's coordinated are changed according the
* the explanation before)
*
*/
template<typename Mem> void applyPointBC(encapc<1,Point<dim,T>,Mem> && pt) const
{
for (size_t i = 0 ; i < dim ; i++)
{
if (bc[i] == PERIODIC)
pt.template get<0>()[i] = openfpm::math::periodic_l(pt.template get<0>()[i],domain.getHigh(i),domain.getLow(i));
}
}
/*! \brief It create another object that contain the same decomposition information but with different ghost boxes
*
* \param g ghost
*
* \return a duplicated decomposition with different ghost boxes
*
*/
CartDecomposition<dim,T,Memory> duplicate(const Ghost<dim,T> & g) const
cart.box_nn_processor = box_nn_processor;
cart.sub_domains = sub_domains;
cart.fine_s = fine_s;
cart.gr = gr;
cart.cd = cd;
cart.domain = domain;
std::copy(spacing,spacing+3,cart.spacing);
cart.bbox = bbox;
cart.ghost = g;
for (size_t i = 0 ; i < dim ; i++)
cart.bc[i] = bc[i];
(static_cast<nn_prcs<dim,T> &>(cart)).create(box_nn_processor, sub_domains);
(static_cast<nn_prcs<dim,T> &>(cart)).applyBC(domain,ghost,bc);
cart.Initialize_geo_cell_lists();
cart.calculateGhostBoxes();
return cart;
}
/*! \brief It create another object that contain the same information and act in the same way
CartDecomposition<dim,T,Memory> duplicate() const
(static_cast<ie_loc_ghost<dim,T>*>(&cart))->operator=(static_cast<ie_loc_ghost<dim,T>>(*this));
(static_cast<nn_prcs<dim,T>*>(&cart))->operator=(static_cast<nn_prcs<dim,T>>(*this));
(static_cast<ie_ghost<dim,T>*>(&cart))->operator=(static_cast<ie_ghost<dim,T>>(*this));
cart.sub_domains = sub_domains;
cart.box_nn_processor = box_nn_processor;
cart.fine_s = fine_s;
cart.gr = gr;
cart.cd = cd;
cart.domain = domain;
std::copy(spacing,spacing+3,cart.spacing);
cart.ghost = ghost;
for (size_t i = 0 ; i < dim ; i++)
cart.bc[i] = this->bc[i];
return cart;
}
/*! \brief Copy the element
*
* \param cart element to copy
*
CartDecomposition<dim,T,Memory> & operator=(const CartDecomposition & cart)
{
static_cast<ie_loc_ghost<dim,T>*>(this)->operator=(static_cast<ie_loc_ghost<dim,T>>(cart));
static_cast<nn_prcs<dim,T>*>(this)->operator=(static_cast<nn_prcs<dim,T>>(cart));
static_cast<ie_ghost<dim,T>*>(this)->operator=(static_cast<ie_ghost<dim,T>>(cart));
sub_domains = cart.sub_domains;
box_nn_processor = cart.box_nn_processor;
fine_s = cart.fine_s;
gr = cart.gr;
cd = cart.cd;
domain = cart.domain;
std::copy(cart.spacing,cart.spacing+3,spacing);
ghost = cart.ghost;
for (size_t i = 0 ; i < dim ; i++)
bc[i] = cart.bc[i];
return *this;
}
/*! \brief Copy the element, move semantic
*
* \param cart element to copy
*
CartDecomposition<dim,T,Memory> & operator=(CartDecomposition && cart)
static_cast<ie_loc_ghost<dim,T>*>(this)->operator=(static_cast<ie_loc_ghost<dim,T>>(cart));
static_cast<nn_prcs<dim,T>*>(this)->operator=(static_cast<nn_prcs<dim,T>>(cart));
static_cast<ie_ghost<dim,T>*>(this)->operator=(static_cast<ie_ghost<dim,T>>(cart));
sub_domains.swap(cart.sub_domains);
box_nn_processor.swap(cart.box_nn_processor);
fine_s.swap(cart.fine_s);
gr = cart.gr;
cd = cart.cd;
domain = cart.domain;
std::copy(cart.spacing,cart.spacing+3,spacing);
ghost = cart.ghost;
for (size_t i = 0 ; i < dim ; i++)
bc[i] = cart.bc[i];
return *this;
return *this;
}
/*! \brief The default grid size
*
* The default grid is always an isotropic grid that adapt with the number of processors,
* it define in how many cell it will be divided the space for a particular required minimum
* number of sub-domain
*
* \param n_sub number of subdomains per processors
*
* \return grid dimension (it is one number because on the other dimensions is the same)
*
*/
static size_t getDefaultGrid(size_t n_sub)
{
// Calculate the number of sub-sub-domain on
// each dimension
return openfpm::math::round_big_2(pow(n_sub, 1.0 / dim));
/*! \brief Given a point return in which processor the particle should go
template<typename Mem> size_t inline processorID(const encapc<1, Point<dim,T>, Mem> & p) const
return fine_s.get(cd.template getCell(p));
/*! \brief Given a point return in which processor the particle should go
* \return processorID
size_t inline processorID(const Point<dim,T> &p) const
/*! \brief Given a point return in which processor the particle should go
size_t inline processorID(const T (&p)[dim]) const
/*! \brief Given a point return in which processor the point/particle should go
* Boundary conditions are considered
*
template<typename Mem> size_t inline processorIDBC(encapc<1, Point<dim,T>, Mem> p)
Point<dim,T> pt = p;
applyPointBC(pt);
return fine_s.get(cd.getCell(pt));
/*! \brief Given a point return in which processor the particle should go
*
* Boundary conditions are considered
template<typename ofb> size_t inline processorIDBC(const Point<dim,T> &p) const
{
Point<dim,T> pt = p;
applyPointBC(pt);
return fine_s.get(cd.getCell(p));
}
/*! \brief Given a point return in which processor the particle should go
*
* Boundary consition are considered
*
* \return processorID
*
*/
template<typename ofb> size_t inline processorIDBC(const T (&p)[dim]) const
Point<dim,T> pt = p;
applyPointBC(pt);
/*! \brief Get the periodicity on i dimension
*
* \param i dimension
*
* \return the periodicity in direction i
*
*/
inline size_t periodicity(size_t i)
/*! \brief Get the periodicity
*
*
* \return the periodicity
*
*/
inline const size_t (& periodicity() const) [dim]
{
return bc;
}
/*! \brief Calculate magnification
*
* \param gm distribution grid
*
*/
void calculate_magn(const grid_sm<dim,void> & gm)
{
if (gm.size() == 0)
{
for (size_t i = 0 ; i < dim ; i++)
magn[i] = 1;
}
else
{
for (size_t i = 0 ; i < dim ; i++)
{
if (gr.size(i) % gm.size(i) != 0)
std::cerr << __FILE__ << ":" << __LINE__ << ".Error the decomposition grid specified as gr.size(" << i << ")=" << gr.size(i) << " is not multiple of the distribution grid gm.size(" << i << ")=" << gm.size(i) << std::endl;