ParMetisDistribution.hpp 16.9 KB
Newer Older
Pietro Incardona's avatar
Pietro Incardona committed
1 2 3 4
/*
 * ParMetisDistribution.hpp
 *
 *  Created on: Nov 19, 2015
5
 *      Author: Antonio Leo
Pietro Incardona's avatar
Pietro Incardona committed
6 7
 */

8 9 10 11 12

#ifndef SRC_DECOMPOSITION_PARMETISDISTRIBUTION_HPP_
#define SRC_DECOMPOSITION_PARMETISDISTRIBUTION_HPP_


13 14
#include "SubdomainGraphNodes.hpp"
#include "parmetis_util.hpp"
15
#include "Graph/ids.hpp"
incardon's avatar
incardon committed
16
#include "Graph/CartesianGraphFactory.hpp"
Pietro Incardona's avatar
Pietro Incardona committed
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#define PARMETIS_DISTRIBUTION_ERROR 100002

/*! \brief Class that distribute sub-sub-domains across processors using ParMetis Library
 *
 * Given a graph and setting Computational cost, Communication cost (on the edge) and
 * Migration cost or total Communication costs, it produce the optimal balanced distribution
 *
 * In addition to Metis it provide the functionality to refine the previously computed
 * decomposition
 *
 * ### Initialize a Cartesian graph and decompose
 * \snippet Distribution_unit_tests.hpp Initialize a ParMetis Cartesian graph and decompose
 *
 * ### Refine the decomposition
 * \snippet Distribution_unit_tests.hpp refine with parmetis the decomposition
 *
 */
35
template<unsigned int dim, typename T>
Pietro Incardona's avatar
Pietro Incardona committed
36 37
class ParMetisDistribution
{
incardon's avatar
incardon committed
38 39 40
	//! Is distributed
	bool is_distributed = false;

Pietro Incardona's avatar
Pietro Incardona committed
41 42 43
	//! Vcluster
	Vcluster & v_cl;

44 45
	//! Structure that store the cartesian grid information
	grid_sm<dim, void> gr;
Pietro Incardona's avatar
Pietro Incardona committed
46

47
	//! rectangular domain to decompose
48
	Box<dim, T> domain;
Pietro Incardona's avatar
Pietro Incardona committed
49 50 51 52

	//! Global sub-sub-domain graph
	Graph_CSR<nm_v, nm_e> gp;

53 54 55
	//! Convert the graph to parmetis format
	Parmetis<Graph_CSR<nm_v, nm_e>> parmetis_graph;

incardon's avatar
incardon committed
56 57 58
	//! Id of the sub-sub-domain where we set the costs
	openfpm::vector<size_t> sub_sub_owner;

Pietro Incardona's avatar
Pietro Incardona committed
59
	//! Init vtxdist needed for Parmetis
60 61 62 63 64 65 66 67 68 69 70 71 72 73
	//
	// vtxdist is a common array across processor, it indicate how
	// vertex are distributed across processors
	//
	// Example we have 3 processors
	//
	// processor 0 has 3 vertices
	// processor 1 has 5 vertices
	// processor 2 has 4 vertices
	//
	// vtxdist contain, 0,3,8,12
	//
	// vtx dist is the unique global-id of the vertices
	//
74
	openfpm::vector<rid> vtxdist;
Pietro Incardona's avatar
Pietro Incardona committed
75 76 77 78 79

	//! partitions
	openfpm::vector<openfpm::vector<idx_t>> partitions;

	//! Init data structure to keep trace of new vertices distribution in processors (needed to update main graph)
80
	openfpm::vector<openfpm::vector<gid>> v_per_proc;
Pietro Incardona's avatar
Pietro Incardona committed
81

82
	//! Hashmap to access to the global position given the re-mapped one (needed for access the map)
83
	std::unordered_map<rid, gid> m2g;
Pietro Incardona's avatar
Pietro Incardona committed
84

85 86
	//! Flag to check if weights are used on vertices
	bool verticesGotWeights = false;
Pietro Incardona's avatar
Pietro Incardona committed
87

88
	/*! \brief Update main graph ad subgraph with the received data of the partitions from the other processors
Pietro Incardona's avatar
Pietro Incardona committed
89 90 91 92
	 *
	 */
	void updateGraphs()
	{
incardon's avatar
incardon committed
93 94
		sub_sub_owner.clear();

95
		size_t Np = v_cl.getProcessingUnits();
Pietro Incardona's avatar
Pietro Incardona committed
96 97

		// Init n_vtxdist to gather informations about the new decomposition
98
		openfpm::vector<rid> n_vtxdist(Np + 1);
99
		for (size_t i = 0; i <= Np; i++)
100
			n_vtxdist.get(i).id = 0;
Pietro Incardona's avatar
Pietro Incardona committed
101

102 103
		// Update the main graph with received data from processor i
		for (size_t i = 0; i < Np; i++)
Pietro Incardona's avatar
Pietro Incardona committed
104
		{
105
			size_t ndata = partitions.get(i).size();
106
			size_t k = 0;
Pietro Incardona's avatar
Pietro Incardona committed
107

108
			// Update the main graph with the received informations
109
			for (rid l = vtxdist.get(i); k < ndata && l < vtxdist.get(i + 1); k++, ++l)
Pietro Incardona's avatar
Pietro Incardona committed
110
			{
111
				// Create new n_vtxdist (just count processors vertices)
112
				++n_vtxdist.get(partitions.get(i).get(k) + 1);
Pietro Incardona's avatar
Pietro Incardona committed
113

incardon's avatar
incardon committed
114 115 116
				// vertex id from vtx to grobal id
				auto v_id = m2g.find(l)->second.id;

117
				// Update proc id in the vertex (using the old map)
incardon's avatar
incardon committed
118 119 120 121
				gp.template vertex_p<nm_v::proc_id>(v_id) = partitions.get(i).get(k);

				if (partitions.get(i).get(k) == (long int)v_cl.getProcessUnitID())
					sub_sub_owner.add(v_id);
Pietro Incardona's avatar
Pietro Incardona committed
122 123

				// Add vertex to temporary structure of distribution (needed to update main graph)
124
				v_per_proc.get(partitions.get(i).get(k)).add(getVertexGlobalId(l));
Pietro Incardona's avatar
Pietro Incardona committed
125 126 127
			}
		}

128 129
		// Create new n_vtxdist (accumulate the counters)
		for (size_t i = 2; i <= Np; i++)
Pietro Incardona's avatar
Pietro Incardona committed
130 131 132
			n_vtxdist.get(i) += n_vtxdist.get(i - 1);

		// Copy the new decomposition in the main vtxdist
133
		for (size_t i = 0; i <= Np; i++)
Pietro Incardona's avatar
Pietro Incardona committed
134 135
			vtxdist.get(i) = n_vtxdist.get(i);

incardon's avatar
incardon committed
136 137 138 139
		openfpm::vector<size_t> cnt;
		cnt.resize(Np);

		for (size_t i = 0 ; i < gp.getNVertex(); ++i)
140
		{
incardon's avatar
incardon committed
141
			size_t pid = gp.template vertex_p<nm_v::proc_id>(i);
142

incardon's avatar
incardon committed
143 144 145 146 147 148 149 150
			rid j = rid(vtxdist.get(pid).id + cnt.get(pid));
			gid gi = gid(i);

			gp.template vertex_p<nm_v::id>(i) = j.id;
			cnt.get(pid)++;

			setMapId(j,gi);
		}
151
	}
Pietro Incardona's avatar
Pietro Incardona committed
152

153 154 155 156 157 158 159
	/*! \brief operator to access the vertex by mapped position
	 *
	 * operator to access the vertex
	 *
	 * \param id re-mapped id of the vertex to access
	 *
	 */
160
	inline auto vertexByMapId(rid id) -> decltype( gp.vertex(m2g.find(id)->second.id) )
161
	{
162
		return gp.vertex(m2g.find(id)->second.id);
163
	}
Pietro Incardona's avatar
Pietro Incardona committed
164

165 166 167 168 169 170
	/*! \brief operator to remap vertex to a new position
	 *
	 * \param n re-mapped position
	 * \param g global position
	 *
	 */
171
	inline void setMapId(rid n, gid g)
172 173 174 175 176 177 178 179 180 181
	{
		m2g[n] = g;
	}

	/*! \brief Get the global id of the vertex given the re-mapped one
	 *
	 * \param remapped id
	 * \return global id
	 *
	 */
182
	gid getVertexGlobalId(rid n)
183 184 185 186 187 188 189 190 191 192 193
	{
		return m2g.find(n)->second;
	}

	/*! \brief operator to init ids vector
	 *
	 * operator to init ids vector
	 *
	 */
	void initLocalToGlobalMap()
	{
194 195 196 197
		gid g;
		rid i;
		i.id = 0;

198
		m2g.clear();
199
		for ( ; (size_t)i.id < gp.getNVertex(); ++i)
200
		{
201 202 203
			g.id = i.id;

			m2g.insert( { i, g });
204
		}
Pietro Incardona's avatar
Pietro Incardona committed
205 206
	}

207 208 209 210 211 212 213 214 215
	/*! \brief Callback of the sendrecv to set the size of the array received
	 *
	 * \param msg_i Index of the message
	 * \param total_msg Total numeber of messages
	 * \param total_p Total number of processors to comunicate with
	 * \param i Processor id
	 * \param ri Request id
	 * \param ptr Void pointer parameter for additional data to pass to the call-back
	 */
Pietro Incardona's avatar
Pietro Incardona committed
216 217 218 219 220 221 222 223 224
	static void * message_receive(size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void * ptr)
	{
		openfpm::vector < openfpm::vector < idx_t >> *v = static_cast<openfpm::vector<openfpm::vector<idx_t>> *>(ptr);

		v->get(i).resize(msg_i / sizeof(idx_t));

		return &(v->get(i).get(0));
	}

incardon's avatar
incardon committed
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	/*! \brief It update the full decomposition
	 *
	 *
	 */
	void postDecomposition()
	{
		//! Get the processor id
		size_t p_id = v_cl.getProcessUnitID();

		//! Get the number of processing units
		size_t Np = v_cl.getProcessingUnits();

		// Number of local vertex
		size_t nl_vertex = vtxdist.get(p_id+1).id - vtxdist.get(p_id).id;

		//! Get result partition for this processors
		idx_t * partition = parmetis_graph.getPartition();

		//! Prepare vector of arrays to contain all partitions
		partitions.get(p_id).resize(nl_vertex);
		std::copy(partition, partition + nl_vertex, &partitions.get(p_id).get(0));

		// Reset data structure to keep trace of new vertices distribution in processors (needed to update main graph)
		for (size_t i = 0; i < Np; ++i)
		{
			v_per_proc.get(i).clear();
		}

		// Communicate the local distribution to the other processors
		// to reconstruct individually the global graph
		openfpm::vector<size_t> prc;
		openfpm::vector<size_t> sz;
		openfpm::vector<void *> ptr;

		for (size_t i = 0; i < Np; i++)
		{
			if (i != v_cl.getProcessUnitID())
			{
				partitions.get(i).clear();
				prc.add(i);
				sz.add(nl_vertex * sizeof(idx_t));
				ptr.add(partitions.get(p_id).getPointer());
			}
		}

		if (prc.size() == 0)
			v_cl.sendrecvMultipleMessagesNBX(0, NULL, NULL, NULL, message_receive, &partitions,NONE);
		else
			v_cl.sendrecvMultipleMessagesNBX(prc.size(), &sz.get(0), &prc.get(0), &ptr.get(0), message_receive, &partitions,NONE);

		// Update graphs with the received data
		updateGraphs();
	}


Pietro Incardona's avatar
Pietro Incardona committed
280 281
public:

282 283
	/*! Constructor for the ParMetis class
	 *
284
	 * \param v_cl Vcluster to use as communication object in this class
285
	 */
286
	ParMetisDistribution(Vcluster & v_cl)
incardon's avatar
incardon committed
287
	:is_distributed(false),v_cl(v_cl), parmetis_graph(v_cl, v_cl.getProcessingUnits()), vtxdist(v_cl.getProcessingUnits() + 1), partitions(v_cl.getProcessingUnits()), v_per_proc(v_cl.getProcessingUnits())
288 289
	{
	}
290

291 292 293 294
	/*! Copy constructor
	 *
	 * \param pm Distribution to copy
	 *
295
	 */
296 297
	ParMetisDistribution(const ParMetisDistribution<dim,T> & pm)
	:v_cl(pm.v_cl),parmetis_graph(v_cl, v_cl.getProcessingUnits())
298
	{
299
		this->operator=(pm);
300 301
	}

302 303 304
	/*! Copy constructor
	 *
	 * \param pm Distribution to copy
305 306
	 *
	 */
307
	ParMetisDistribution(ParMetisDistribution<dim,T> && pm)
incardon's avatar
incardon committed
308
	:v_cl(pm.v_cl)
309
	{
310
		this->operator=(pm);
311 312
	}

313
	/*! \brief Create the Cartesian graph
314
	 *
315 316
	 * \param grid info
	 * \param dom domain
317
	 */
318
	void createCartGraph(grid_sm<dim, void> & grid, Box<dim, T> dom)
319
	{
320 321 322 323 324
		size_t bc[dim];

		for (size_t i = 0 ; i < dim ; i++)
			bc[i] = NON_PERIODIC;

325 326 327 328 329 330
		// Set grid and domain
		gr = grid;
		domain = dom;

		// Create a cartesian grid graph
		CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;
331
		gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc);
332
		initLocalToGlobalMap();
333

334 335 336 337 338 339 340 341
		//! Get the number of processing units
		size_t Np = v_cl.getProcessingUnits();

		//! Division of vertices in Np graphs
		//! Put (div+1) vertices in mod graphs
		//! Put div vertices in the rest of the graphs
		size_t mod_v = gr.size() % Np;
		size_t div_v = gr.size() / Np;
342

343 344 345
		for (size_t i = 0; i <= Np; i++)
		{
			if (i < mod_v)
346
				vtxdist.get(i).id = (div_v + 1) * i;
347
			else
348
				vtxdist.get(i).id = (div_v) * i + mod_v;
349
		}
350 351 352 353 354 355

		// Init to 0.0 axis z (to fix in graphFactory)
		if (dim < 3)
		{
			for (size_t i = 0; i < gp.getNVertex(); i++)
			{
356
				gp.vertex(i).template get<nm_v::x>()[2] = 0.0;
357 358
			}
		}
359 360 361 362 363
		for (size_t i = 0; i < gp.getNVertex(); i++)
		{
			gp.vertex(i).template get<nm_v::global_id>() = i;
		}

364 365 366 367 368 369 370 371 372 373
	}

	/*! \brief Get the current graph (main)
	 *
	 */
	Graph_CSR<nm_v, nm_e> & getGraph()
	{
		return gp;
	}

374
	/*! \brief Create the decomposition
375 376 377 378
	 *
	 */
	void decompose()
	{
incardon's avatar
incardon committed
379 380 381 382
		if (is_distributed == false)
			parmetis_graph.initSubGraph(gp, vtxdist, m2g, verticesGotWeights);
		else
			parmetis_graph.reset(gp, vtxdist, m2g, verticesGotWeights);
383 384

		//! Decompose
incardon's avatar
incardon committed
385
		parmetis_graph.decompose(vtxdist);
386

incardon's avatar
incardon committed
387 388
		// update after decomposition
		postDecomposition();
389

incardon's avatar
incardon committed
390
		is_distributed = true;
391 392 393
	}

	/*! \brief Refine current decomposition
Pietro Incardona's avatar
Pietro Incardona committed
394 395 396 397 398 399 400 401
	 *
	 * It makes a refinement of the current decomposition using Parmetis function RefineKWay
	 * After that it also does the remapping of the graph
	 *
	 */
	void refine()
	{
		// Reset parmetis graph and reconstruct it
402
		parmetis_graph.reset(gp, vtxdist, m2g, verticesGotWeights);
Pietro Incardona's avatar
Pietro Incardona committed
403 404

		// Refine
incardon's avatar
incardon committed
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
		parmetis_graph.refine(vtxdist);

		postDecomposition();
	}

	/*! \brief Redecompose current decomposition
	 *
	 * It makes a redecomposition using Parmetis taking into consideration
	 * also migration cost
	 *
	 */
	void redecompose()
	{
		// Reset parmetis graph and reconstruct it
		parmetis_graph.reset(gp, vtxdist, m2g, verticesGotWeights);

		// Refine
		parmetis_graph.redecompose(vtxdist);
Pietro Incardona's avatar
Pietro Incardona committed
423

incardon's avatar
incardon committed
424
		postDecomposition();
Pietro Incardona's avatar
Pietro Incardona committed
425 426
	}

427
	/*! \brief Compute the unbalance of the processor compared to the optimal balance
428
	 *
429
	 * \return the unbalance from the optimal one 0.01 mean 1%
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	 */
	float getUnbalance()
	{
		long t_cost = 0;

		long min, max, sum;
		float unbalance;

		t_cost = getProcessorLoad();

		min = t_cost;
		max = t_cost;
		sum = t_cost;

		v_cl.min(min);
		v_cl.max(max);
		v_cl.sum(sum);
		v_cl.execute();

449
		unbalance = ((float) (max - min)) / (float) (sum / v_cl.getProcessingUnits());
450 451 452 453

		return unbalance * 100;
	}

454
	/*! \brief function that return the position of the vertex in the space
Pietro Incardona's avatar
Pietro Incardona committed
455 456 457 458 459
	 *
	 * \param id vertex id
	 * \param pos vector that will contain x, y, z
	 *
	 */
460
	void getSubSubDomainPosition(size_t id, T (&pos)[dim])
Pietro Incardona's avatar
Pietro Incardona committed
461
	{
incardon's avatar
incardon committed
462
#ifdef SE_CLASS1
463
		if (id >= gp.getNVertex())
incardon's avatar
incardon committed
464 465
			std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
466 467 468 469

		// Copy the geometrical informations inside the pos vector
		pos[0] = gp.vertex(id).template get<nm_v::x>()[0];
		pos[1] = gp.vertex(id).template get<nm_v::x>()[1];
Pietro Incardona's avatar
Pietro Incardona committed
470
		if (dim == 3)
471
			pos[2] = gp.vertex(id).template get<nm_v::x>()[2];
Pietro Incardona's avatar
Pietro Incardona committed
472 473
	}

474
	/*! \brief Function that set the weight of the vertex
Pietro Incardona's avatar
Pietro Incardona committed
475 476
	 *
	 * \param id vertex id
477
	 * \param weight to give to the vertex
Pietro Incardona's avatar
Pietro Incardona committed
478 479
	 *
	 */
480
	inline void setComputationCost(size_t id, size_t weight)
Pietro Incardona's avatar
Pietro Incardona committed
481
	{
482
		if (!verticesGotWeights)
483 484
			verticesGotWeights = true;

incardon's avatar
incardon committed
485
#ifdef SE_CLASS1
486
		if (id >= gp.getNVertex())
incardon's avatar
incardon committed
487
			std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
incardon's avatar
incardon committed
488
#endif
489 490

		// Update vertex in main graph
Pietro Incardona's avatar
Pietro Incardona committed
491 492 493
		gp.vertex(id).template get<nm_v::computation>() = weight;
	}

494 495
	/*! \brief Checks if weights are used on the vertices
	 *
496
	 * \return true if weights are used in the decomposition
497 498 499 500 501 502 503 504 505 506 507
	 */
	bool weightsAreUsed()
	{
		return verticesGotWeights;
	}

	/*! \brief function that get the weight of the vertex
	 *
	 * \param id vertex id
	 *
	 */
508
	size_t getSubSubDomainComputationCost(size_t id)
509
	{
incardon's avatar
incardon committed
510
#ifdef SE_CLASS1
511
		if (id >= gp.getNVertex())
incardon's avatar
incardon committed
512 513
			std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
514 515 516 517

		return gp.vertex(id).template get<nm_v::computation>();
	}

518 519 520 521 522 523 524 525
	/*! \brief Compute the processor load counting the total weights of its vertices
	 *
	 * \return the computational load of the processor graph
	 */
	size_t getProcessorLoad()
	{
		size_t load = 0;

526 527 528 529
		// Processor id
		size_t p_id = v_cl.getProcessUnitID();


530 531
		for (rid i = vtxdist.get(p_id); i < vtxdist.get(p_id+1) ; ++i)
			load += gp.vertex(m2g.find(i)->second.id).template get<nm_v::computation>();
incardon's avatar
incardon committed
532

533
		//std::cout << v_cl.getProcessUnitID() << " weight " << load << " size " << sub_g.getNVertex() << "\n";
534 535 536
		return load;
	}

537 538 539 540 541 542 543
	/*! \brief Set migration cost of the vertex id
	 *
	 * \param id of the vertex to update
	 * \param migration cost of the migration
	 */
	void setMigrationCost(size_t id, size_t migration)
	{
incardon's avatar
incardon committed
544
#ifdef SE_CLASS1
545
		if (id >= gp.getNVertex())
incardon's avatar
incardon committed
546 547
			std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
548

549 550 551 552 553
		gp.vertex(id).template get<nm_v::migration>() = migration;
	}

	/*! \brief Set communication cost of the edge id
	 *
554 555 556
	 * \param v_id Id of the source vertex of the edge
	 * \param e i child of the vertex
	 * \param communication Communication value
557
	 */
558
	void setCommunicationCost(size_t v_id, size_t e, size_t communication)
559
	{
incardon's avatar
incardon committed
560
#ifdef SE_CLASS1
561 562 563

		size_t e_id = v_id + e;

564 565
		if (e_id >= gp.getNEdge())
			std::cerr << "Such edge doesn't exist (id = " << e_id << ", " << "total size = " << gp.getNEdge() << ")\n";
incardon's avatar
incardon committed
566
#endif
567

568
		gp.getChildEdge(v_id, e).template get<nm_e::communication>() = communication;
569 570 571
	}

	/*! \brief Returns total number of sub-sub-domains in the distribution graph
incardon's avatar
incardon committed
572 573
	 *
	 * \return the total number of sub-sub-domains
574 575
	 *
	 */
incardon's avatar
incardon committed
576
	size_t getNSubSubDomains() const
577 578 579 580
	{
		return gp.getNVertex();
	}

incardon's avatar
incardon committed
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	/*! \brief Return the total number of sub-sub-domains this processor own
	 *
	 * \return the total number of sub-sub-domains owned by this processor
	 *
	 */
	size_t getNOwnerSubSubDomains() const
	{
		return sub_sub_owner.size();
	}

	/*! \brief Return the global id of the owned sub-sub-domain
	 *
	 * \param id in the list of owned sub-sub-domains
	 *
	 * \return the global id
	 *
	 */
	size_t getOwnerSubSubDomain(size_t id) const
	{
		return sub_sub_owner.get(id);
	}

603 604
	/*! \brief Returns total number of neighbors of the sub-sub-domain id
	 *
incardon's avatar
incardon committed
605
	 * \param id id of the sub-sub-domain
incardon's avatar
incardon committed
606 607 608
	 *
	 * \return the number of neighborhood sub-sub-domains for each sub-domain
	 *
609 610 611
	 */
	size_t getNSubSubDomainNeighbors(size_t id)
	{
incardon's avatar
incardon committed
612
#ifdef SE_CLASS1
613
		if (id >= gp.getNVertex())
incardon's avatar
incardon committed
614 615
			std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
616 617 618 619

		return gp.getNChilds(id);
	}

620
	/*! \brief Print the current distribution and save it to VTK file
621
	 *
622
	 * \param file filename
623 624
	 *
	 */
625
	void write(const std::string & file)
626
	{
Pietro Incardona's avatar
Pietro Incardona committed
627 628
		VTKWriter<Graph_CSR<nm_v, nm_e>, VTK_GRAPH> gv2(gp);
		gv2.write(std::to_string(v_cl.getProcessUnitID()) + "_" + file + ".vtk");
Pietro Incardona's avatar
Pietro Incardona committed
629
	}
630 631 632

	const ParMetisDistribution<dim,T> & operator=(const ParMetisDistribution<dim,T> & dist)
	{
incardon's avatar
incardon committed
633
		is_distributed = dist.is_distributed;
634 635 636 637 638 639 640
		gr = dist.gr;
		domain = dist.domain;
		gp = dist.gp;
		vtxdist = dist.vtxdist;
		partitions = dist.partitions;
		v_per_proc = dist.v_per_proc;
		verticesGotWeights = dist.verticesGotWeights;
incardon's avatar
incardon committed
641 642
		sub_sub_owner = dist.sub_sub_owner;
		m2g = dist.m2g;
incardon's avatar
incardon committed
643
		parmetis_graph = dist.parmetis_graph;
644 645 646 647

		return *this;
	}

648
	const ParMetisDistribution<dim,T> & operator=(ParMetisDistribution<dim,T> && dist)
649
	{
incardon's avatar
incardon committed
650
		is_distributed = dist.is_distributed;
651 652 653 654 655 656 657 658
		v_cl = dist.v_cl;
		gr = dist.gr;
		domain = dist.domain;
		gp.swap(dist.gp);
		vtxdist.swap(dist.vtxdist);
		partitions.swap(dist.partitions);
		v_per_proc.swap(dist.v_per_proc);
		verticesGotWeights = dist.verticesGotWeights;
incardon's avatar
incardon committed
659 660
		sub_sub_owner.swap(dist.sub_sub_owner);
		m2g.swap(dist.m2g);
incardon's avatar
incardon committed
661
		parmetis_graph = dist.parmetis_graph;
662 663 664

		return *this;
	}
incardon's avatar
incardon committed
665 666 667 668 669 670 671 672 673 674

	/*! \brief Get the decomposition counter
	 *
	 * \return the decomposition counter
	 *
	 */
	size_t get_ndec()
	{
		return parmetis_graph.get_ndec();
	}
675

incardon's avatar
incardon committed
676
	/*! \brief Set the tolerance for each partition
677
	 *
incardon's avatar
incardon committed
678
	 * \param tol tolerance
679 680
	 *
	 */
incardon's avatar
incardon committed
681
	void setDistTol(double tol)
682
	{
incardon's avatar
incardon committed
683
		parmetis_graph.setDistTol(tol);
684
	}
Pietro Incardona's avatar
Pietro Incardona committed
685 686 687
};

#endif /* SRC_DECOMPOSITION_PARMETISDISTRIBUTION_HPP_ */