From b26f57ebd866c057f6c6758f7de0187618ba5db9 Mon Sep 17 00:00:00 2001
From: Pietro Incardona <incardon@mpi-cbg.de>
Date: Tue, 27 Jan 2015 20:02:29 +0100
Subject: [PATCH] Added missing files

---
 Makefile.am                     |   3 +
 configure.ac                    | 204 ++++++++++++++
 src/dec_optimizer.hpp           | 475 ++++++++++++++++++++++++++++++++
 src/dec_optimizer_unit_test.hpp |  71 +++++
 src/gargabe.hpp                 | 105 +++++++
 src/hypercube_unit_test.hpp     | 384 ++++++++++++++++++++++++++
 src/main.cpp                    |  21 ++
 src/metis_util.hpp              | 306 ++++++++++++++++++++
 src/metis_util_unit_test.hpp    | 186 +++++++++++++
 9 files changed, 1755 insertions(+)
 create mode 100755 Makefile.am
 create mode 100755 configure.ac
 create mode 100644 src/dec_optimizer.hpp
 create mode 100644 src/dec_optimizer_unit_test.hpp
 create mode 100644 src/gargabe.hpp
 create mode 100644 src/hypercube_unit_test.hpp
 create mode 100755 src/main.cpp
 create mode 100644 src/metis_util.hpp
 create mode 100644 src/metis_util_unit_test.hpp

diff --git a/Makefile.am b/Makefile.am
new file mode 100755
index 00000000..7c618d3e
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+bin_PROGRAMS = 
\ No newline at end of file
diff --git a/configure.ac b/configure.ac
new file mode 100755
index 00000000..fb719e48
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,204 @@
+#                                               -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+
+AC_PREREQ(2.59)
+AC_INIT(FULL-PACKAGE-NAME, VERSION, BUG-REPORT-ADDRESS)
+AC_CANONICAL_SYSTEM
+AC_CONFIG_SRCDIR([src/main.cpp])
+AM_INIT_AUTOMAKE
+AC_CONFIG_HEADER([src/config.h])
+m4_ifdef([MYSQL_FOUND],,[m4_include([m4/ax_lib_mysql.m4])])
+m4_ifdef([AX_CHECK_COMPILER_FLAGS],,[m4_include([m4/ax_check_compiler_flags.m4])])
+m4_ifdef([ACX_PTHREAD],,[m4_include([m4/acx_pthread.m4])])
+m4_ifdef([AX_CHECK_CL],,[m4_include([m4/ax_opencl.m4])])
+m4_ifdef([AX_BOOST_BASE],,[m4_include([m4/ax_boost_base.m4])])
+m4_ifdef([AX_BOOST_PROGRAM_OPTIONS],,[m4_include([m4/ax_boost_program_options.m4])])
+m4_ifdef([AX_BOOST_THREAD],,[m4_include([m4/ax_boost_thread.m4])])
+m4_ifdef([ACX_MPI],,[m4_include([m4/acx_mpi.m4])])
+m4_ifdef([AX_OPENMP],,[m4_include([m4/ax_openmp.m4])])
+m4_ifdef([AX_GCC_X86_CPUID],,[m4_include([m4/ax_gcc_x86_cpuid.m4])])
+m4_ifdef([AX_GCC_ARCHFLAG],,[m4_include([m4/ax_gcc_archflag.m4])])
+m4_ifdef([AX_CUDA],,[m4_include([m4/ax_cuda.m4])])
+
+CXXFLAGS+=" --std=c++11 "
+NVCCFLAGS=" "
+INCLUDES_PATH=" "
+
+# Checks for programs.
+AC_PROG_CXX
+
+# Checks g++ flags
+
+AC_CANONICAL_HOST
+
+# Check target architetture
+
+AX_GCC_ARCHFLAG([], [CXXFLAGS="$CXXFLAGS $ax_cv_gcc_archflag"], [])
+
+###### Check for debug compilation
+
+AC_MSG_CHECKING(whether to build with debug information)
+debuger=no
+AC_ARG_ENABLE(debug,
+	AC_HELP_STRING(
+	    [--enable-debug],
+	    [enable debug data generation (def=no)]
+	),
+  	debuger="$enableval"
+)
+
+
+
+AC_MSG_RESULT($debuger)
+if test x"$debuger" = x"yes"; then
+	AC_DEFINE([DEBUG_MODE],[],[Debug])
+	AC_DEFINE([DEBUG],[],[Debug])
+	CXXFLAGS="$CXXFLAGS -g3  -Wall -O0 "
+	NVCCFLAGS+="$NVCCFLAGS -g -O0 "
+else
+	CXXFLAGS="$CXXFLAGS -Wall -O3 -g3 "
+	NVCCFLAGS+="$NVCCFLAGS -O3 "
+fi
+
+## Check for memcheck
+
+AC_MSG_CHECKING(whether to build with memcheck capabilities)
+debuger=no
+AC_ARG_ENABLE(memcheck,
+        AC_HELP_STRING(
+            [--enable-memcheck],
+            [enable memory check (def=no)]
+        ),
+        memcheck="$enableval"
+)
+
+AC_MSG_RESULT($memcheck)
+
+if test x"$memcheck" = x"yes"; then
+        AC_DEFINE([MEMLEAK_CHECK],[],[Memory check, corruption and leak])
+fi
+
+#########
+
+####### include OpenFPM_devices include path
+
+INCLUDES_PATH+="-I. -I../../metis_install/include -I../../OpenFPM_IO/src -I../../OpenFPM_data/src -I../../OpenFPM_devices/src -I../../OpenFPM_vcluster/src/"
+
+####### Checking for GPU support
+
+AX_CUDA
+
+AC_MSG_CHECKING(whether to build with GPU support)
+gpu_support=no
+AC_ARG_ENABLE(gpu,
+	AC_HELP_STRING(
+	    [--enable-gpu],
+	    [enable gpu support]
+	),
+  	gpu_support="$enableval"
+)
+
+
+
+AC_MSG_RESULT($gpu_support)
+if test x"$gpu_support" = x"yes"; then
+	AC_DEFINE([GPU],[],[GPU support])
+fi
+
+
+# Set this conditional if cuda is wanted
+
+AM_CONDITIONAL(BUILDCUDA, test ! x$NVCC = x"no")
+
+###########################
+
+no_avx=no
+no_sse42=no
+no_sse41=no
+no_sse3=no
+no_sse2=no
+no_sse=no
+no_mmx=no
+AX_CHECK_COMPILER_FLAGS([-msse4.2],[CXXFLAGS="$CXXFLAGS -mavx"],[no_avx=yes])
+AX_CHECK_COMPILER_FLAGS([-msse4.2],[CXXFLAGS="$CXXFLAGS -msse4.2"],[no_sse42=yes])
+AX_CHECK_COMPILER_FLAGS([-msse4.1],[CXXFLAGS="$CXXFLAGS -msse4.1"],[no_sse41=yes])
+AX_CHECK_COMPILER_FLAGS([-msse3],[CXXFLAGS="$CXXFLAGS -msse3"],[no_sse3=yes])
+AX_CHECK_COMPILER_FLAGS([-msse2],[CXXFLAGS="$CXXFLAGS -msse2"],[no_sse2=yes])
+AX_CHECK_COMPILER_FLAGS([-msse],[CXXFLAGS="$CXXFLAGS -msse"],[no_sse=yes])
+AX_CHECK_COMPILER_FLAGS([-mmmx],[CXXFLAGS="$CXXFLAGS -mmmx"],[no_mmx=yes])
+AX_CHECK_COMPILER_FLAGS([-Wno-unused-but-set-variable],[CXXFLAGS="$CXXFLAGS -Wno-unused-but-set-variable"],[])
+
+AC_SUBST(NVCCFLAGS)
+AC_SUBST(INCLUDES_PATH)
+
+# Checks for typedefs, structures, and compiler characteristics.
+
+# Checks for library functions.
+
+AC_CONFIG_FILES([Makefile src/Makefile])
+AC_OUTPUT
+echo ""
+echo   "***********************************"
+echo   "*                                 *"
+arch_str="${ax_cv_gcc_archflag#-march=#-mtune=}"
+arch_str="${arch_str#-mtune=}"
+n_arch_str=${#arch_str}
+for (( X=0; X<23-n_arch_str; X++ ))
+do
+   arch_str="$arch_str "
+done
+echo "*    arch: $arch_str*"
+if [ test x"$no_sse42" = x"no" ]; then
+    echo "*    sse4.2: yes                  *"
+else
+    echo "*    sse4.2: no                   *"
+fi
+if [ test x"$no_sse41" = x"no" ]; then
+    echo "*    sse4.1: yes                  *"
+else
+    echo "*    sse4.1: no                   *"
+fi
+if [ test x"$no_sse3" = x"no" ]; then
+    echo "*    sse3: yes                    *"
+else
+    echo "*    sse3: no                     *"
+fi
+if [ test x"$no_sse2" = x"no" ]; then
+    echo "*    sse2: yes                    *"
+else
+    echo "*    sse2: no                     *"
+fi
+if [ test x"$no_sse" = x"no" ]; then
+    echo "*    sse: yes                     *"
+else
+    echo "*    sse: no                      *"
+fi
+if [ test x"$no_mmx" = x"no" ]; then
+  echo "*    mmx: yes                     *"
+else
+  echo "*    mmx: no                      *"
+fi
+if [ test x"$profiler" = x"yes" ]; then
+  echo "*    profiler: yes                *"
+else
+  echo "*    profiler: no                 *"
+fi
+if [ test x"$debuger" = x"yes" ]; then
+  echo "*    debug: yes                   *"
+else
+  echo "*    debug: no                    *"
+fi
+if [ test x"$no_64" = x"no" ]; then
+  echo "*    64 bit: yes                  *"
+else
+  echo "*    64 bit: no                   *"
+fi
+if [ test x"$gpu_support" = x"no" ]; then
+  echo "*    gpu: no                      *"
+else
+  echo "*    gpu: yes                     *"
+fi
+echo   "*                                 *"
+echo   "***********************************"
+
diff --git a/src/dec_optimizer.hpp b/src/dec_optimizer.hpp
new file mode 100644
index 00000000..3eeba645
--- /dev/null
+++ b/src/dec_optimizer.hpp
@@ -0,0 +1,475 @@
+#ifndef DEC_OPTIMIZER_HPP
+#define DEC_OPTIMIZER_HPP
+
+/*! \brief this class represent a wavefront of dimension dim
+ *
+ * \dim Dimensionality of the wavefront (dimensionality of the space
+ *                                       where it live so the wavefront
+ *                                       is dim-1)
+ *
+ */
+
+template <unsigned int dim>
+class wavefront
+{
+public:
+
+	typedef boost::fusion::vector<size_t[dim],size_t[dim]> type;
+
+	typedef typename memory_traits_inte<type>::type memory_int;
+	typedef typename memory_traits_lin<type>::type memory_lin;
+
+	type data;
+
+	static const int start = 0;
+	static const int stop = 1;
+	static const int max_prop = 2;
+
+	/* \brief Get the key to the point 1
+	 *
+	 * \return the key to the point 1
+	 *
+	 */
+
+	grid_key_dx<dim> getKP1()
+	{
+		// grid key to return
+		grid_key_dx<dim> ret(boost::fusion::at_c<start>(data));
+
+		return ret;
+	}
+
+	/* \brief Get the key to point 2
+	 *
+	 * \return the key to the point 2
+	 *
+	 */
+
+	grid_key_dx<dim> getKP2()
+	{
+		// grid key to return
+		grid_key_dx<dim> ret(boost::fusion::at_c<stop>(data));
+
+		return ret;
+	}
+
+	/* \brief produce a box from an encapsulated object
+	 *
+	 * \param encap encapsulated object
+	 *
+	 */
+
+	template<typename encap> static Box<dim,size_t> getBox(encap & enc)
+	{
+		Box<dim,size_t> bx;
+
+		// Create the object from the encapsulation
+
+		for (int i = 0 ; i < dim ; i++)
+		{
+			bx.setHigh(i,enc.template get<wavefront::start>()[i]);
+			bx.setLow(i,enc.template get<wavefront::stop>()[i]);
+		}
+
+		return bx;
+	}
+
+	/* \brief produce a box from an encapsulated object
+	 *
+	 * \param encap encapsulated object
+	 *
+	 */
+
+	template<typename encap> static void getBox(encap & enc, Box<dim,size_t> & bx)
+	{
+		// Create the object from the encapsulation
+
+		for (int i = 0 ; i < dim ; i++)
+		{
+			bx.setHigh(i,enc.template get<wavefront::start>()[i]);
+			bx.setLow(i,enc.template get<wavefront::stop>()[i]);
+		}
+	}
+};
+
+/*! \brief This class take a graph representing the space decomposition and produce a
+ *         simplified version
+ *
+ * Given a Graph_CSR and a seed point produce an alternative decomposition to reduce
+ * the ghost over-stress
+ *
+ */
+
+template <unsigned int dim, typename Graph>
+class dec_optimizer
+{
+	// create a grid header for helping
+
+	grid<dim,void> gh;
+
+private:
+
+	/* \brief Fill the wavefront position
+	 *
+	 * \tparam prp property to set
+	 *
+	 * \param graph we are processing
+	 * \param Box to fill
+	 * \param id value to fill with
+	 *
+	 */
+
+	template<unsigned int prp> void write_wavefront(Graph & graph,openfpm::vector<wavefront<dim>> & v_w)
+	{
+		// fill the wall domain with 0
+
+		fill_domain<prp>(graph,gh.getBox(),0);
+
+		// fill the wavefront
+
+		for (int i = 0 ; i < v_w.size() ; i++)
+		{
+			Box<dim,size_t> box = wavefront<dim>::getBox(v_w.get(i));
+
+			fill_domain<prp>(graph,box,1);
+		}
+	}
+
+	/* \brief Fill the domain
+	 *
+	 * \tparam p_sub property to set with the sub-domain id
+	 *
+	 * \param graph we are processing
+	 * \param Box to fill
+	 * \param ids value to fill with
+	 *
+	 */
+
+	template<unsigned int p_sub> void fill_domain(Graph & graph, Box<dim,size_t> & box, size_t ids)
+	{
+		// Create a subgrid iterator
+		grid_key_dx_iterator_sub<dim> g_sub(gh,box.getKP1(),box.getKP2());
+
+		// iterate through all grid points
+
+		while (g_sub.isNext())
+		{
+			// get the actual key
+			const grid_key_dx<dim> & gk = g_sub.get();
+
+			// get the vertex and set the sub id
+
+			graph.vertex(gh.LinId(gk)).template get<p_sub>() = ids;
+		}
+	}
+
+	/* \brief Add the boundary domain to the queue
+	 *
+	 * \tparam i-property where is stored the decomposition
+	 *
+	 * \param domains vector with domains to process
+	 * \param graph we are processing
+	 * \param w_comb hyper-cube combinations
+	 *
+	 */
+	template<unsigned int p_sub> void add_to_queue(openfpm::vector<size_t> & domains, openfpm::vector<wavefront<dim>> & v_w, Graph & graph,  std::vector<comb<dim>> & w_comb)
+	{
+		// create a new queue
+		openfpm::vector<size_t> domains_new;
+
+		// push in the new queue, the old domains of the queue that are not assigned element
+
+		for (size_t j = 0 ; j < domains.size() ; j++)
+		{
+			if (graph.vertex(domains.get(j)).template get<p_sub>() < 0)
+			{
+				// not assigned push it
+
+				domains_new.add(domains.get(j));
+			}
+		}
+
+		// take the wavefront expand on direction d of one
+
+		for (int d = 0 ; d < v_w.size() ; d++)
+		{
+			// expand the wavefront
+			for (int j = 0 ; j < dim ; j++)
+			{
+				v_w.template get<wavefront<dim>::start>(d)[j] = v_w.template get<wavefront<dim>::start>(d)[j] + w_comb[d].c[j];
+				v_w.template get<wavefront<dim>::stop>(d)[j] = v_w.template get<wavefront<dim>::stop>(d)[j] + w_comb[d].c[j];
+			}
+		}
+
+		// for each expanded wavefront create a sub-grid iterator and add the subbomain
+
+		for (int d = 0 ; d < v_w.size() ; d++)
+		{
+			// Create a sub-grid iterator
+			grid_key_dx_iterator_sub<dim> g_sub(gh,v_w.template get<wavefront<dim>::start>(d),v_w.template get<wavefront<dim>::stop>(d));
+
+			// iterate through all grid points
+
+			while (g_sub.isNext())
+			{
+				// get the actual key
+				const grid_key_dx<dim> & gk = g_sub.get();
+
+				// get the vertex and check if is not assigned add to the queue
+
+				if (graph.vertex(gh.LinId(gk)).template get<p_sub>() < 0)
+				{
+					// add
+
+					domains_new.add(gh.LinId(gk));
+				}
+			}
+		}
+
+		// copy the new queue to the old one (it not copied, C++11 move semantic)
+		domains = domains_new;
+	}
+
+	/* \brief Find the biggest hyper-cube
+	 *
+	 * starting from one initial sub-domain find the biggest hyper-cube
+	 *
+	 * \tparam j id of the property storing the sub-decomposition
+	 * \tparam i id of the property containing the decomposition
+	 *
+	 * \param start_p initial domain
+	 * \param graph representing the grid
+	 *
+	 */
+	template <unsigned int p_sub, unsigned int p_id> void expand_from_point(size_t start_p, Graph & graph, Box<dim,size_t> & box, openfpm::vector<wavefront<dim>> & v_w , std::vector<comb<dim>> & w_comb)
+	{
+		// We assume that Graph is the rapresentation of a cartesian graph
+		// this mean that the direction d is at the child d
+
+		// Get the number of wavefronts
+		size_t n_wf = w_comb.size();
+
+		// Create an Hyper-cube
+		HyperCube<dim> hyp;
+
+		// direction of expansion
+
+		size_t domain_id = graph.vertex(gh.LinId(start_p)).template get<p_id>();
+		bool can_expand = true;
+
+		// while is possible to expand
+
+		while (can_expand)
+		{
+			// reset can expand
+			can_expand = false;
+
+			// for each direction of expansion expand the wavefront
+
+			for (int d = 0 ; d < n_wf ; d++)
+			{
+				// number of processed sub-domain
+				size_t n_proc_sub = 0;
+
+				// flag to indicate if the wavefront can expand
+				bool w_can_expand = true;
+
+				// Create an iterator of the wavefront
+				grid_key_dx_iterator_sub<dim> it(gh,v_w.template get<wavefront<dim>::start>(d),v_w.template get<wavefront<dim>::stop>(d));
+
+				// for each subdomain
+				while (it.isNext())
+				{
+					// get the wavefront sub-domain id
+					size_t sub_w = gh.LinId(it.get());
+
+					// we get the processor id of the neighborhood sub-domain on direction d
+					size_t exp_p = graph.vertex(graph.getChild(sub_w,d)).template get<p_id>();
+
+					// we check if it is the same processor id
+					w_can_expand &= exp_p != domain_id;
+
+					// next domain
+					++it;
+
+					// increase the number of processed sub-domain
+					n_proc_sub++;
+				}
+
+				// if we did not processed sub-domain, we cannot expand
+				w_can_expand &= (n_proc_sub != 0);
+
+				// if you can expand one wavefront we did not reach the end
+				can_expand |= w_can_expand;
+
+				// if we can expand the wavefront expand it
+				if (w_can_expand == true)
+				{
+					// expand the wavefront
+					for (int j = 0 ; j < dim ; j++)
+					{
+						v_w.template get<wavefront<dim>::stop>(d)[j] = v_w.template get<wavefront<dim>::stop>(d)[j] + w_comb[d].c[j];
+						v_w.template get<wavefront<dim>::start>(d)[j] = v_w.template get<wavefront<dim>::start>(d)[j] + w_comb[d].c[j];
+
+						std::cout << "stop: " << v_w.template get<wavefront<dim>::stop>(d)[j] << "\n";
+						std::cout << "start: " << v_w.template get<wavefront<dim>::start>(d)[j] << "\n";
+					}
+
+					// expand the intersection of the wavefronts
+
+					std::vector<comb<dim>> q_comb = SubHyperCube<dim,dim-1>::getCombinations_R(w_comb[d],dim-2);
+
+					// Eliminate the w_comb[d] direction
+
+					for (int k = 0 ; k < q_comb.size() ; k++)
+					{
+						for (int j = 0 ; j < dim ; j++)
+						{
+							if (w_comb[d].c[j] != 0)
+							{
+								q_comb[k].c[j] = 0;
+							}
+						}
+					}
+
+					// for all the combinations
+					for (int j = 0 ; j < q_comb.size() ; j++)
+					{
+						size_t id = hyp.LinId(q_comb[j]);
+
+						// get the combination of the direction d
+
+						bool is_pos = hyp.isPositive(d);
+
+						// is positive, modify the stop point or the starting point
+
+						for (int s = 0 ; s < dim ; s++)
+						{
+							if (is_pos == true)
+							{v_w.template get<wavefront<dim>::stop>(id)[j] = v_w.template get<wavefront<dim>::stop>(d)[j] + w_comb[id].c[j];}
+							else
+							{v_w.template get<wavefront<dim>::start>(id)[j] = v_w.template get<wavefront<dim>::start>(d)[j] + w_comb[id].c[j];}
+						}
+					}
+				}
+			}
+
+			// Debug output the wavefront graph for debug
+
+			// duplicate the graph
+
+			Graph g_debug = graph.duplicate();
+			write_wavefront<nm_part_v::id>(g_debug,v_w);
+
+			VTKWriter<Graph> vtk(g_debug);
+			vtk.template write<1>("vtk_debug.vtk");
+
+			////////////////////////////////////
+		}
+
+		// get back the hyper-cube produced
+
+		for (int i = 0 ; i < dim ; i++)
+		{
+			// get the index of the wavefront direction
+			int w = 0;
+
+			for (int j = 0 ; j < dim ; j++)
+			{
+				if (w_comb[i].c[j] == 1)
+				{
+					w = j;
+					break;
+				}
+			}
+
+			// set the box
+			box.setHigh(i,v_w.template get<wavefront<dim>::stop>(i)[w]);
+			box.setLow(i,v_w.template get<wavefront<dim>::start>(i)[w]);
+		}
+	}
+
+public:
+
+	/*! \brief Constructor
+	 *
+	 * \param g Graph to simplify
+	 * \param sz size of the grid on each dimension
+	 *
+	 */
+
+	dec_optimizer(Graph & g, std::vector<size_t> sz)
+	:gh(sz)
+	{
+		// The graph g is suppose to represent a cartesian grid
+		// No check is performed on g
+	}
+
+	/*! \brief optimize the graph
+	 *
+	 * Starting from a domain (hyper-cubic), it create wavefront at the boundary and expand
+	 * the boundary until the wavefronts cannot expand any more.
+	 * To the domains inside the hyper-cube one sub-id is assigned. This procedure continue until
+	 * all the domain of one id has a sub-id
+	 *
+	 * \tparam j property containing the decomposition
+	 * \tparam i property to fill with the sub-decomposition
+	 *
+	 * \param Seed point
+	 * \param graph we are processing
+	 *
+	 */
+
+	template <unsigned int p_sub, unsigned int p_id> void optimize(grid_key_dx<dim> & start_p, Graph & graph)
+	{
+		// sub-domain id
+		size_t sub_id =  0;
+
+		// queue
+		openfpm::vector<size_t> v_q;
+
+		// Create an hyper-cube
+		HyperCube<dim> hyp;
+
+		// Get the wavefront combinations
+		std::vector<comb<dim>> w_comb = hyp.getCombinations_R(dim-1);
+
+		// wavefronts
+		openfpm::vector<wavefront<dim>> v_w(w_comb.size());
+
+		// Initialize the wavefronts from the domain start_p
+
+		for (int i = 0 ; i < v_w.size() ; i++)
+		{
+			for (int j = 0 ; j < dim ; j++)
+			{
+				v_w.template get<wavefront<dim>::start>(i)[j] = start_p.get(start_p.get(j));
+				v_w.template get<wavefront<dim>::stop>(i)[j] = start_p.get(start_p.get(j));
+			}
+		}
+
+		// push the first domain
+
+		v_q.add(gh.LinId(start_p));
+
+		while (v_q.size() != 0)
+		{
+			// Box
+			Box<dim,size_t> box;
+
+			// Create the biggest box containing the domain
+			expand_from_point<p_sub,p_id>(gh.LinId(start_p),graph,box,v_w,w_comb);
+
+			// fill the domain
+			fill_domain<p_sub>(graph,box,sub_id);
+
+			// create the queue
+			add_to_queue<p_sub>(v_q,v_w,graph,w_comb);
+
+			// increment the sub_id
+			sub_id++;
+		}
+	}
+};
+
+#endif
diff --git a/src/dec_optimizer_unit_test.hpp b/src/dec_optimizer_unit_test.hpp
new file mode 100644
index 00000000..7ab5aa20
--- /dev/null
+++ b/src/dec_optimizer_unit_test.hpp
@@ -0,0 +1,71 @@
+/*
+ * dec_optimize.hpp
+ *
+ *  Created on: Jan 16, 2015
+ *      Author: i-bird
+ */
+
+#ifndef DEC_OPTIMIZE_HPP_
+#define DEC_OPTIMIZE_HPP_
+
+
+#include "Graph/CartesianGraphFactory.hpp"
+#include "map_graph.hpp"
+#include "metis_util.hpp"
+#include "dec_optimizer.hpp"
+
+#undef GS_SIZE
+#define GS_SIZE 8
+
+BOOST_AUTO_TEST_SUITE( dec_optimizer_test )
+
+BOOST_AUTO_TEST_CASE( dec_optimizer_test_use)
+{
+	CartesianGraphFactory<3,Graph_CSR<nm_v,nm_e>> g_factory;
+	CartesianGraphFactory<3,Graph_CSR<nm_part_v,nm_part_e>> g_factory_part;
+
+	// Cartesian grid
+	std::vector<size_t> sz;
+	sz.push_back(4);
+	sz.push_back(4);
+	sz.push_back(1);
+
+	// Box
+	Box<3,float> box({0.0,0.0,0.0},{1.0,1.0,1.0});
+
+	// Graph to decompose
+
+	Graph_CSR<nm_v,nm_e> g = g_factory.construct<nm_e::communication,float,2,0,1,2>(sz,box);
+
+	// Processor graph
+
+	Graph_CSR<nm_part_v,nm_part_e> gp = g_factory_part.construct<NO_EDGE,float,2>(sz,box);
+
+	// Convert the graph to metis
+
+	Metis<Graph_CSR<nm_v,nm_e>> met(g,16);
+
+	// decompose
+
+	met.decompose<nm_part_v::id>(gp);
+	met.decompose<nm_v::id>();
+
+	// optimize
+
+	dec_optimizer<3,Graph_CSR<nm_v,nm_e>> d_o(g,sz);
+
+	grid_key_dx<3> keyZero(0,0,0);
+	d_o.optimize<nm_v::sub_id,nm_v::id>(keyZero,g);
+
+	// Write the VTK file
+
+	VTKWriter<Graph_CSR<nm_part_v,nm_part_e>> vtk(gp);
+	vtk.write("vtk_partition.vtk");
+	VTKWriter<Graph_CSR<nm_v,nm_e>> vtk2(g);
+	vtk2.write("vtk_partition2.vtk");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+
+#endif /* DEC_OPTIMIZE_HPP_ */
diff --git a/src/gargabe.hpp b/src/gargabe.hpp
new file mode 100644
index 00000000..4b610b42
--- /dev/null
+++ b/src/gargabe.hpp
@@ -0,0 +1,105 @@
+/*
+ * gargabe.hpp
+ *
+ *  Created on: Jan 13, 2015
+ *      Author: i-bird
+ */
+
+#ifndef GARGABE_HPP_
+#define GARGABE_HPP_
+
+
+
+	template <unsigned int j, unsigned int i, typename Graph> void optimize(size_t start_p, Graph & graph)
+	{
+		// We assume that Graph is the rapresentation of a cartesian graph
+		// this mean that the direction d is at the child d
+
+		// Create an Hyper-cube
+
+		HyperCube<dim> hyp;
+
+		// Get the number of wavefronts
+
+		size_t n_wf = hyp.getNumberOfElements_R(0);
+
+		// Get the number of intersecting wavefront
+
+
+
+		// Get the number of sub-dimensional common wavefront
+		// basically are a list of all the subdomain common to two or more
+
+		// Create n_wf wavefront queue
+
+		openfpm::vector<wavefront> v_w;
+		v.reserve(n_wf);
+
+		// direction of expansion
+
+		size_t domain_id = 0;
+		int exp_dir = 0;
+		bool can_expand = true;
+
+		// while is possible to expand
+
+		while (can_expand)
+		{
+			// for each direction of expansion expand the wavefront
+
+			for (int d = 0 ; d < n_wf ; d++)
+			{
+				// get the wavefront at direction d
+
+				openfpm::vector<size_t> & wf_d = v_w.get<wavefront::domains>(d);
+
+				// flag to indicate if the wavefront can expand
+
+				bool w_can_expand = true;
+
+				// for each subdomain
+
+				for (size_t sub = 0 ; sub < wf_d.size() ; sub++)
+				{
+					// check if the adjacent domain in direction d exist
+					// and is of the same id
+
+					// get the starting subdomain
+					size_t sub_w = wf_d.get<0>(sub);
+
+					// we get the processor id of the neighborhood sub-domain on direction d
+					size_t exp_p = graph.getChild(sub_w,d).get<j>();
+
+					// we check if it is the same processor id
+					if (exp_p != domain_id)
+					{
+						w_can_expand = false;
+					}
+				}
+
+				// if we can expand the wavefront expand it
+				if (w_can_expand == true)
+				{
+					// for each subdomain
+					for (size_t sub = 0 ; sub < wf_d.size() ; sub++)
+					{
+						// update the position of the wavefront
+						wf_d.get<0>(sub) = wf_d.get<0>(sub) + gh.stride(d);
+					}
+
+					// here we add sub-domains to all the other queues
+					// get the face of the hyper-cube
+
+					SubHyperCube<dim,dim-1> sub_hyp = hyp.getSubHyperCube(d);
+
+					std::vector<comb<dim>> q_comb = sub_hyp.getCombinations_R(dim-2);
+				}
+			}
+		}
+
+		// For each point in the Hyper-cube check if we can move the wave front
+
+
+	}
+
+#endif /* GARGABE_HPP_ */
diff --git a/src/hypercube_unit_test.hpp b/src/hypercube_unit_test.hpp
new file mode 100644
index 00000000..ca63a875
--- /dev/null
+++ b/src/hypercube_unit_test.hpp
@@ -0,0 +1,384 @@
+#ifndef HYPERCUBE_UNIT_TEST_HPP
+#define HYPERCUBE_UNIT_TEST_HPP
+
+/*! \brief Check if the linearization is correct
+ *
+ * \param cmb vector of combination to checl
+ *
+ */
+
+template<unsigned int dim> void check_lin(std::vector<comb<dim>> cmb)
+{
+	// Check the linearization
+	for (int i = 0 ; i < cmb.size() ; i++)
+	{
+		BOOST_REQUIRE_EQUAL(HyperCube<dim>::LinId(cmb[i]),i);
+	}
+}
+
+/*! \brief Check if the combinations are dinstict
+ *
+ * \param combs combinations to check
+ *
+ */
+
+template<unsigned int dim> bool isDinstict(std::vector<comb<dim>> combs)
+{
+	// Check if the combination are dinstinct
+
+	for (int i = 0 ; i < combs.size() ; i++)
+	{
+		for (int j = i+1 ; j < combs.size() ; j++)
+		{
+			if (combs[i] == combs[j])
+				return false;
+		}
+
+	}
+
+	return true;
+}
+
+/*! \brief isSubdecomposition check if combs are elements that exist in c as sub-elements
+ *
+ * \param combs elements to check
+ * \param element that must contain all the combs
+ *
+ */
+
+template<unsigned int dim> bool isSubdecomposition(std::vector<comb<dim>> combs, comb<dim> c)
+{
+	for (int i = 0 ; i < combs.size() ; i++)
+	{
+		if (c.isSub(combs[i]) == false)
+			return false;
+	}
+
+	return true;
+}
+
+template<unsigned int dim> bool isValid(std::vector<comb<dim>> combs)
+{
+	// Check if the combinations are valid
+
+	for (int i = 0 ; i < combs.size() ; i++)
+	{
+		if (combs[i].isValid() == false)
+			return false;
+	}
+
+	return true;
+}
+
+BOOST_AUTO_TEST_SUITE( Hyper_cube )
+
+BOOST_AUTO_TEST_CASE( Hyper_cube_use)
+{
+	// Test Hyper-Cube functionality
+
+	size_t ele[8];
+
+	// Point
+//	ele[0] = HyperCube<0>::getNumberOfElements_R(0);
+
+	// Get combination for each dimensions
+//	std::vector<comb<0>> v_c0 = HyperCube<0>::getCombinations_R(0);
+
+	// Check
+//	BOOST_REQUIRE_EQUAL(ele[0],1);
+
+	// Line
+	// Number of vertex
+	ele[0] = HyperCube<1>::getNumberOfElements_R(0);
+	// Number of edge
+	ele[1] = HyperCube<1>::getNumberOfElements_R(1);
+
+	// Get combination for each dimensions
+	std::vector<comb<1>> v_c1_0 = HyperCube<1>::getCombinations_R(0);
+
+	// Check
+	BOOST_REQUIRE_EQUAL(ele[0],2);
+	BOOST_REQUIRE_EQUAL(ele[1],1);
+
+	// Fill the expected vector
+
+	comb<1> c[] = {1,-1};
+
+	// Check the linearization
+	check_lin(v_c1_0);
+
+	boost_check_array(&c[0],&v_c1_0[0],2);
+
+	// Square
+	// Number of vertex
+	ele[0] = HyperCube<2>::getNumberOfElements_R(0);
+	// Number of edge
+	ele[1] = HyperCube<2>::getNumberOfElements_R(1);
+	// Number of faces
+	ele[2] = HyperCube<2>::getNumberOfElements_R(2);
+
+	// Get combination for each dimensions
+	std::vector<comb<2>> v_c2_0 = HyperCube<2>::getCombinations_R(0);
+	std::vector<comb<2>> v_c2_1 = HyperCube<2>::getCombinations_R(1);
+
+	// Check
+	BOOST_REQUIRE_EQUAL(ele[0],4);
+	BOOST_REQUIRE_EQUAL(ele[1],4);
+	BOOST_REQUIRE_EQUAL(ele[2],1);
+
+	// Check combination
+
+	comb<2> c2_0[] = {{1,1},{1,-1},{-1,1},{-1,-1}};
+	comb<2> c2_1[] = {{1,0},{-1,0},{0,1},{0,-1}};
+	check_lin(v_c2_0);
+	check_lin(v_c2_1);
+
+	boost_check_array(&c2_0[0],&v_c2_0[0],4);
+	boost_check_array(&c2_1[0],&v_c2_1[0],4);
+
+	// Cube
+	// Number of vertex
+	ele[0] = HyperCube<3>::getNumberOfElements_R(0);
+	// Number of edge
+	ele[1] = HyperCube<3>::getNumberOfElements_R(1);
+	// Number of faces
+	ele[2] = HyperCube<3>::getNumberOfElements_R(2);
+	// Number of Cubes
+	ele[3] = HyperCube<3>::getNumberOfElements_R(3);
+
+	// Get combination for each dimensions
+	std::vector<comb<3>> v_c3_0 = HyperCube<3>::getCombinations_R(0);
+	std::vector<comb<3>> v_c3_1 = HyperCube<3>::getCombinations_R(1);
+	std::vector<comb<3>> v_c3_2 = HyperCube<3>::getCombinations_R(2);
+
+	// Check
+	BOOST_REQUIRE_EQUAL(ele[0],8);
+	BOOST_REQUIRE_EQUAL(ele[1],12);
+	BOOST_REQUIRE_EQUAL(ele[2],6);
+	BOOST_REQUIRE_EQUAL(ele[3],1);
+
+	// Check combination
+
+	comb<3> c3_0[] = {{1,1,1},{1,1,-1},{1,-1,1},{1,-1,-1},{-1,1,1},{-1,1,-1},{-1,-1,1},{-1,-1,-1}};
+	comb<3> c3_1[] = {{1,1,0},{1,-1,0},{-1,1,0},{-1,-1,0},{1,0,1},{1,0,-1},{-1,0,1},{-1,0,-1},{0,1,1},{0,1,-1},{0,-1,1},{0,-1,-1}};
+	comb<3> c3_2[] = {{1,0,0},{-1,0,0},{0,1,0},{0,-1,0},{0,0,1},{0,0,-1}};
+	check_lin(v_c3_0);
+	check_lin(v_c3_1);
+	check_lin(v_c3_2);
+
+	boost_check_array(&c3_0[0],&v_c3_0[0],8);
+	boost_check_array(&c3_1[0],&v_c3_1[0],12);
+	boost_check_array(&c3_2[0],&v_c3_2[0],6);
+
+	// Tesseract
+	// Number of vertex
+	ele[0] = HyperCube<4>::getNumberOfElements_R(0);
+	// Number of edge
+	ele[1] = HyperCube<4>::getNumberOfElements_R(1);
+	// Number of faces
+	ele[2] = HyperCube<4>::getNumberOfElements_R(2);
+	// Number of Cubes
+	ele[3] = HyperCube<4>::getNumberOfElements_R(3);
+	// Number of Tessaract
+	ele[4] = HyperCube<4>::getNumberOfElements_R(4);
+
+	// Get combination for each dimensions
+	std::vector<comb<4>> v_c4_0 = HyperCube<4>::getCombinations_R(0);
+	std::vector<comb<4>> v_c4_1 = HyperCube<4>::getCombinations_R(1);
+	std::vector<comb<4>> v_c4_2 = HyperCube<4>::getCombinations_R(2);
+	std::vector<comb<4>> v_c4_3 = HyperCube<4>::getCombinations_R(3);
+	check_lin(v_c4_0);
+	check_lin(v_c4_1);
+	check_lin(v_c4_2);
+	check_lin(v_c4_3);
+
+	// Check
+	BOOST_REQUIRE_EQUAL(ele[0],16);
+	BOOST_REQUIRE_EQUAL(ele[1],32);
+	BOOST_REQUIRE_EQUAL(ele[2],24);
+	BOOST_REQUIRE_EQUAL(ele[3],8);
+	BOOST_REQUIRE_EQUAL(ele[4],1);
+
+	// Penteract
+	// Number of vertex
+	ele[0] = HyperCube<5>::getNumberOfElements_R(0);
+	// Number of edge
+	ele[1] = HyperCube<5>::getNumberOfElements_R(1);
+	// Number of faces
+	ele[2] = HyperCube<5>::getNumberOfElements_R(2);
+	// Number of Cubes
+	ele[3] = HyperCube<5>::getNumberOfElements_R(3);
+	// Number of Tessaract
+	ele[4] = HyperCube<5>::getNumberOfElements_R(4);
+	// Number of Penteract
+	ele[5] = HyperCube<5>::getNumberOfElements_R(5);
+
+	// Get combination for each dimensions
+	std::vector<comb<5>> v_c5_0 = HyperCube<5>::getCombinations_R(0);
+	std::vector<comb<5>> v_c5_1 = HyperCube<5>::getCombinations_R(1);
+	std::vector<comb<5>> v_c5_2 = HyperCube<5>::getCombinations_R(2);
+	std::vector<comb<5>> v_c5_3 = HyperCube<5>::getCombinations_R(3);
+	std::vector<comb<5>> v_c5_4 = HyperCube<5>::getCombinations_R(4);
+	check_lin(v_c5_0);
+	check_lin(v_c5_1);
+	check_lin(v_c5_2);
+	check_lin(v_c5_3);
+	check_lin(v_c5_4);
+
+	// Check
+	BOOST_REQUIRE_EQUAL(ele[0],32);
+	BOOST_REQUIRE_EQUAL(ele[1],80);
+	BOOST_REQUIRE_EQUAL(ele[2],80);
+	BOOST_REQUIRE_EQUAL(ele[3],40);
+	BOOST_REQUIRE_EQUAL(ele[4],10);
+	BOOST_REQUIRE_EQUAL(ele[5],1);
+
+
+	// Test SubHypercube 2D and 3D
+
+	std::vector<comb<2>> sc2_0 = HyperCube<2>::getCombinations_R(0);
+
+	for (size_t i = 0 ; i < sc2_0.size() ; i++)
+	{
+		// Expecting one element equal to c2[i]
+		std::vector<comb<2>> combs = SubHyperCube<2,0>::getCombinations_R(sc2_0[i],0);
+		BOOST_REQUIRE_EQUAL(combs.size(),1);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc2_0[i]),true);
+	}
+
+	std::vector<comb<2>> sc2_1 = HyperCube<2>::getCombinations_R(1);
+
+	for (size_t i = 0 ; i < sc2_1.size() ; i++)
+	{
+		// Expecting two elements, valid, distinct,  sub-decomposition of c2[i]
+		std::vector<comb<2>> combs = SubHyperCube<2,1>::getCombinations_R(sc2_1[i],0);
+		BOOST_REQUIRE_EQUAL(combs.size(),2);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc2_1[i]),true);
+
+		// Expecting one element, valid, distinct,  sub-decomposition of c2[i]
+		combs = SubHyperCube<2,1>::getCombinations_R(sc2_1[i],1);
+		BOOST_REQUIRE_EQUAL(combs.size(),1);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc2_1[i]),true);
+	}
+
+	std::vector<comb<2>> sc2_2 = HyperCube<2>::getCombinations_R(2);
+
+	for (size_t i = 0 ; i < sc2_2.size() ; i++)
+	{
+		// Expecting two elements, valid, distinct,  sub-decomposition of sc2_2[i]
+		std::vector<comb<2>> combs = SubHyperCube<2,2>::getCombinations_R(sc2_2[i],0);
+		BOOST_REQUIRE_EQUAL(combs.size(),4);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc2_2[i]),true);
+
+		// Expecting one element, valid, distinct,  sub-decomposition of c2[i]
+		combs = SubHyperCube<2,2>::getCombinations_R(sc2_2[i],1);
+		BOOST_REQUIRE_EQUAL(combs.size(),4);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc2_2[i]),true);
+
+		// Expecting one element, valid, distinct,  sub-decomposition of c2[i]
+		combs = SubHyperCube<2,2>::getCombinations_R(sc2_2[i],2);
+		BOOST_REQUIRE_EQUAL(combs.size(),1);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc2_2[i]),true);
+	}
+
+	////////////// 3D ////////////////
+
+	std::vector<comb<3>> sc3_0 = HyperCube<3>::getCombinations_R(0);
+
+	for (size_t i = 0 ; i < sc3_0.size() ; i++)
+	{
+		// Expecting one element equal to sc3[i]
+		std::vector<comb<3>> combs = SubHyperCube<3,0>::getCombinations_R(sc3_0[i],0);
+		BOOST_REQUIRE_EQUAL(combs.size(),1);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc3_0[i]),true);
+	}
+
+	std::vector<comb<3>> sc3_1 = HyperCube<3>::getCombinations_R(1);
+
+	for (size_t i = 0 ; i < sc3_1.size() ; i++)
+	{
+		// Expecting two elements, valid, distinct,  sub-decomposition of sc3[i]
+		std::vector<comb<3>> combs = SubHyperCube<3,1>::getCombinations_R(sc3_1[i],0);
+		BOOST_REQUIRE_EQUAL(combs.size(),2);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc3_1[i]),true);
+
+		// Expecting one element, valid, distinct,  sub-decomposition of c2[i]
+		combs = SubHyperCube<3,1>::getCombinations_R(sc3_1[i],1);
+		BOOST_REQUIRE_EQUAL(combs.size(),1);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc3_1[i]),true);
+	}
+
+	std::vector<comb<3>> sc3_2 = HyperCube<3>::getCombinations_R(2);
+
+	for (size_t i = 0 ; i < sc3_2.size() ; i++)
+	{
+		// Expecting two elements, valid, distinct,  sub-decomposition of sc3_2[i]
+		std::vector<comb<3>> combs = SubHyperCube<3,2>::getCombinations_R(sc3_2[i],0);
+		BOOST_REQUIRE_EQUAL(combs.size(),4);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc3_2[i]),true);
+
+		// Expecting one element, valid, distinct,  sub-decomposition of c2[i]
+		combs = SubHyperCube<3,2>::getCombinations_R(sc3_2[i],1);
+		BOOST_REQUIRE_EQUAL(combs.size(),4);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc3_2[i]),true);
+
+		// Expecting one element, valid, distinct,  sub-decomposition of c2[i]
+		combs = SubHyperCube<3,2>::getCombinations_R(sc3_2[i],2);
+		BOOST_REQUIRE_EQUAL(combs.size(),1);
+		BOOST_REQUIRE_EQUAL(isDinstict(combs),true);
+		BOOST_REQUIRE_EQUAL(isValid(combs),true);
+		BOOST_REQUIRE_EQUAL(isSubdecomposition(combs,sc3_2[i]),true);
+	}
+
+/*	comb<5> c0;
+	for (int i = 0 ; i < 5 ; i++)
+		c0.c[i] = 1;
+
+	std::vector<comb<5>> combs_0_0 = SubHyperCube<5,0>::getCombinations_R(c0,0);
+	std::vector<comb<5>> combs_1_0 = SubHyperCube<5,1>::getCombinations_R(c,0);
+	std::vector<comb<5>> combs_1_1 = SubHyperCube<5,1>::getCombinations_R(c,1);
+	std::vector<comb<5>> combs_2_0 = SubHyperCube<5,2>::getCombinations_R(c,0);
+	std::vector<comb<5>> combs_2_1 = SubHyperCube<5,2>::getCombinations_R(c,1);
+	std::vector<comb<5>> combs_2_2 = SubHyperCube<5,2>::getCombinations_R(c,2);
+	std::vector<comb<5>> combs_3_0 = SubHyperCube<5,3>::getCombinations_R(c,0);
+	std::vector<comb<5>> combs_3_1 = SubHyperCube<5,3>::getCombinations_R(c,1);
+	std::vector<comb<5>> combs_3_2 = SubHyperCube<5,3>::getCombinations_R(c,2);
+	std::vector<comb<5>> combs_3_3 = SubHyperCube<5,3>::getCombinations_R(c,3);
+	std::vector<comb<5>> combs_4_0 = SubHyperCube<5,4>::getCombinations_R(c,0);
+	std::vector<comb<5>> combs_4_1 = SubHyperCube<5,4>::getCombinations_R(c,1);
+	std::vector<comb<5>> combs_4_2 = SubHyperCube<5,4>::getCombinations_R(c,2);
+	std::vector<comb<5>> combs_4_3 = SubHyperCube<5,4>::getCombinations_R(c,3);
+	std::vector<comb<5>> combs_4_4 = SubHyperCube<5,4>::getCombinations_R(c,4);
+	std::vector<comb<5>> combs_5_0 = SubHyperCube<5,5>::getCombinations_R(c,0);
+	std::vector<comb<5>> combs_5_1 = SubHyperCube<5,5>::getCombinations_R(c,1);
+	std::vector<comb<5>> combs_5_2 = SubHyperCube<5,5>::getCombinations_R(c,2);
+	std::vector<comb<5>> combs_5_3 = SubHyperCube<5,5>::getCombinations_R(c,3);
+	std::vector<comb<5>> combs_5_4 = SubHyperCube<5,5>::getCombinations_R(c,4);
+	std::vector<comb<5>> combs_5_5 = SubHyperCube<5,5>::getCombinations_R(c,5);*/
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+#endif
diff --git a/src/main.cpp b/src/main.cpp
new file mode 100755
index 00000000..70c9befa
--- /dev/null
+++ b/src/main.cpp
@@ -0,0 +1,21 @@
+#include <iostream>
+#include "Graph/CartesianGraphFactory.hpp"
+
+#define BOOST_DISABLE_ASSERTS
+
+
+#define BOOST_TEST_MODULE "C++ test module for OpenFPM_pdata project"
+#include <boost/test/included/unit_test.hpp>
+
+#include <grid_dist.hpp>
+#include "Point_test.hpp"
+#include "Decomposition/CartDecomposition.hpp"
+#include "memory/HeapMemory.hpp"
+#include "Space/Shape/Box.hpp"
+#include "util.hpp"
+
+#include "hypercube_unit_test.hpp"
+#include "CartesianGraphFactory_unit_test.hpp"
+#include "metis_util_unit_test.hpp"
+#include "dec_optimizer_unit_test.hpp"
+
diff --git a/src/metis_util.hpp b/src/metis_util.hpp
new file mode 100644
index 00000000..0ea26f6d
--- /dev/null
+++ b/src/metis_util.hpp
@@ -0,0 +1,306 @@
+/*
+ * metis_util.hpp
+ *
+ *  Created on: Nov 21, 2014
+ *      Author: Pietro Incardona
+ */
+
+#ifndef METIS_UTIL_HPP
+#define METIS_UTIL_HPP
+
+#include <iostream>
+#include "metis.h"
+#include "VTKWriter.hpp"
+
+/*! \brief Metis graph structure
+ *
+ * Metis graph structure
+ *
+ */
+struct Metis_graph
+{
+	//! The number of vertices in the graph
+	idx_t * nvtxs;
+
+	//! number of balancing constrains
+	//! more practical, are the number of weights for each vertex
+	//! PS even we you specify vwgt == NULL ncon must be set at leat to
+	//! one
+	idx_t * ncon;
+
+	//! For each vertex it store the adjacency lost start for the vertex i
+	idx_t * xadj;
+
+	//! For each vertex it store a list of all neighborhood vertex
+	idx_t * adjncy;
+
+	//! Array that store the weight for each vertex
+	idx_t * vwgt;
+
+	//! Array of the vertex size, basically is the total communication amount
+	idx_t * vsize;
+	//! The weight of the edge
+	idx_t * adjwgt;
+	//! number of part to partition the graph
+	idx_t * nparts;
+	//! Desired weight for each partition (one for each constrain)
+	real_t * tpwgts;
+	//! For each partition load imbalance tollerated
+	real_t * ubvec;
+	//! Additional option for the graph partitioning
+	idx_t * options;
+	//! return the total comunication cost for each partition
+	idx_t * objval;
+	//! Is a output vector containing the partition for each vertex
+	idx_t * part;
+};
+
+//! Balance communication and computation
+#define BALANCE_CC 1
+//! Balance communication computation and memory
+#define BALANCE_CCM 2
+//! Balance computation and comunication and others
+#define BALANCE_CC_O(c) c+1
+
+/*! \brief Helper class to define Metis graph
+ *
+ * \tparam graph structure that store the graph
+ *
+ */
+
+template<typename Graph>
+class Metis
+{
+	// Graph in metis reppresentation
+	Metis_graph Mg;
+
+	// Original graph
+	Graph & g;
+
+	/*! \brief Construct Adjacency list
+	 *
+	 * \param g Graph
+	 *
+	 */
+	void constructAdjList(Graph & g)
+	{
+		// create xadj and adjlist
+
+		Mg.xadj = new idx_t[g.getNVertex()+1];
+		Mg.adjncy = new idx_t[g.getNEdge()];
+
+		//! Get a vertex iterator
+		auto it = g.getVertexIterator();
+
+		//! starting point in the adjacency list
+		size_t prev = 0;
+
+		// actual position
+		size_t id = 0;
+
+		// for each vertex calculate the position of the starting point in the adjacency list
+		while (it.isNext())
+		{
+			// Calculate the starting point in the adjacency list
+			Mg.xadj[id] = prev;
+
+			// Create the adjacency list
+			for (size_t s = 0 ; s < g.getNChilds(it) ; s++)
+			{Mg.adjncy[prev+s] = g.getChild(it,s);}
+
+			// update the position for the next vertex
+			prev += g.getNChilds(it);
+
+			++it;
+			id++;
+		}
+
+		// Fill the last point
+		Mg.xadj[id] = prev;
+	}
+
+public:
+
+	/*! \brief Constructor
+	 *
+	 * Construct a metis graph from Graph_CSR
+	 *
+	 * \param g Graph we want to convert to decompose
+	 * \param nc number of partitions
+	 *
+	 */
+	Metis(Graph & g, size_t nc)
+	:g(g)
+	{
+		// Get the number of vertex
+
+		Mg.nvtxs = new idx_t[1];
+		Mg.nvtxs[0] = g.getNVertex();
+
+		// Set the number of constrains
+
+		Mg.ncon = new idx_t[1];
+		Mg.ncon[0] = 1;
+
+		// construct the adjacency list
+		constructAdjList(g);
+
+		// Set to null the weight of the vertex
+
+		Mg.vwgt = NULL;
+
+		// Put the total communication size to NULL
+
+		Mg.vsize = NULL;
+
+		// Set to null the weight of the edge
+
+		Mg.adjwgt = NULL;
+
+		// Set the total number of partitions
+
+		Mg.nparts = new idx_t[1];
+		Mg.nparts[0] = nc;
+
+		// Set to null the desired weight for each partition (one for each constrain)
+
+		Mg.tpwgts = NULL;
+
+		//! Set to null the partition load imbalance tollerace
+
+		Mg.ubvec = NULL;
+
+		//! Set tp null additional option for the graph partitioning
+
+		Mg.options = NULL;
+
+		//! set the objective value
+		Mg.objval = new idx_t[1];
+
+		//! Is an output vector containing the partition for each vertex
+		Mg.part = new idx_t[g.getNVertex()];
+	}
+
+	/*! \brief destructor
+	 *
+	 * Destructor, It destroy all the memory allocated
+	 *
+	 */
+	~Metis()
+	{
+		// Deallocate the Mg structure
+		if (Mg.nvtxs != NULL)
+		{
+			delete [] Mg.nvtxs;
+		}
+
+		if (Mg.ncon != NULL)
+		{
+			delete [] Mg.ncon;
+		}
+
+		if (Mg.xadj != NULL)
+		{
+			delete [] Mg.xadj;
+		}
+
+		if (Mg.adjncy != NULL)
+		{
+			delete [] Mg.adjncy;
+		}
+		if (Mg.vwgt != NULL)
+		{
+			delete [] Mg.vwgt;
+		}
+		if (Mg.adjwgt != NULL)
+		{
+			delete [] Mg.adjwgt;
+		}
+		if (Mg.nparts != NULL)
+		{
+			delete [] Mg.nparts;
+		}
+		if (Mg.tpwgts != NULL)
+		{
+			delete [] Mg.tpwgts;
+		}
+		if (Mg.ubvec != NULL)
+		{
+			delete [] Mg.ubvec;
+		}
+		if (Mg.options != NULL)
+		{
+			delete [] Mg.options;
+		}
+		if (Mg.objval != NULL)
+		{
+			delete [] Mg.objval;
+		}
+		if (Mg.part != NULL)
+		{
+			delete [] Mg.part;
+		}
+	}
+
+	/*! \brief Decompose the graph
+	 *
+	 * \tparam i which property store the decomposition
+	 *
+	 */
+
+	template<unsigned int i>
+	void decompose()
+	{
+		// Decompose
+		METIS_PartGraphKway(Mg.nvtxs,Mg.ncon,Mg.xadj,Mg.adjncy,Mg.vwgt,Mg.vsize,Mg.adjwgt,
+				            Mg.nparts,Mg.tpwgts,Mg.ubvec,Mg.options,Mg.objval,Mg.part);
+
+		// vertex id
+
+		size_t id = 0;
+
+		// For each vertex store the processor that contain the data
+
+		auto it = g.getVertexIterator();
+
+		while (it.isNext())
+		{
+			g.vertex(it).template get<i>() = Mg.part[id];
+
+			++id;
+			++it;
+		}
+	}
+
+	/*! \brief Decompose the graph
+	 *
+	 * \tparam i which property store the decomposition
+	 *
+	 */
+
+	template<unsigned int i, typename Graph_part>
+	void decompose(Graph_part & gp)
+	{
+		// Decompose
+		METIS_PartGraphKway(Mg.nvtxs,Mg.ncon,Mg.xadj,Mg.adjncy,Mg.vwgt,Mg.vsize,Mg.adjwgt,
+				            Mg.nparts,Mg.tpwgts,Mg.ubvec,Mg.options,Mg.objval,Mg.part);
+
+		// vertex id
+
+		size_t id = 0;
+
+		// For each vertex store the processor that contain the data
+
+		auto it = gp.getVertexIterator();
+
+		while (it.isNext())
+		{
+			gp.vertex(it).template get<i>() = Mg.part[id];
+
+			++id;
+			++it;
+		}
+	}
+};
+
+#endif
diff --git a/src/metis_util_unit_test.hpp b/src/metis_util_unit_test.hpp
new file mode 100644
index 00000000..dc5442b6
--- /dev/null
+++ b/src/metis_util_unit_test.hpp
@@ -0,0 +1,186 @@
+/*
+ * metis_util_unit_test.hpp
+ *
+ *  Created on: Dec 21, 2014
+ *      Author: i-bird
+ */
+
+#ifndef METIS_UTIL_UNIT_TEST_HPP_
+#define METIS_UTIL_UNIT_TEST_HPP_
+
+#include "Graph/CartesianGraphFactory.hpp"
+#include "map_graph.hpp"
+#include "metis_util.hpp"
+
+#undef GS_SIZE
+#define GS_SIZE 8
+
+/*!
+ *
+ * Test node
+ *
+ */
+
+struct nm_v
+{
+	//! The node contain 3 unsigned long integer for communication computation memory and id
+	typedef boost::fusion::vector<float,float,float,size_t,size_t,size_t,size_t,size_t> type;
+
+	typedef typename memory_traits_inte<type>::type memory_int;
+	typedef typename memory_traits_lin<type>::type memory_lin;
+
+	//! type of the positional field
+	typedef float s_type;
+
+	//! Attributes name
+	struct attributes
+	{
+		static const std::string name[];
+	};
+
+	//! The data
+	type data;
+
+	//! computation property id in boost::fusion::vector
+	static const unsigned int x = 0;
+	//! computation property id in boost::fusion::vector
+	static const unsigned int y = 1;
+	//! memory property id in boost::fusion::vector
+	static const unsigned int z = 2;
+	//! computation property id in boost::fusion::vector
+	static const unsigned int communication = 3;
+	//! computation property id in boost::fusion::vector
+	static const unsigned int computation = 4;
+	//! memory property id in boost::fusion::vector
+	static const unsigned int memory = 5;
+	//! memory property id in boost::fusion::vector
+	static const unsigned int id = 6;
+	//! memory property sub_id in boost::fusion::vector
+	static const unsigned int sub_id = 7;
+
+	//! total number of properties boost::fusion::vector
+	static const unsigned int max_prop = 8;
+};
+
+const std::string nm_v::attributes::name[] = {"x","y","z","communication","computation","memory","id","sub_id"};
+
+/*!
+ *
+ * Test node
+ *
+ */
+
+struct nm_e
+{
+	//! The node contain 3 unsigned long integer for comunication computation and memory
+	typedef boost::fusion::vector<size_t> type;
+
+	typedef typename memory_traits_inte<type>::type memory_int;
+	typedef typename memory_traits_lin<type>::type memory_lin;
+
+	//! Attributes name
+	struct attributes
+	{
+		static const std::string name[];
+	};
+
+	//! The data
+	type data;
+
+	//! computation property id in boost::fusion::vector
+	static const unsigned int communication = 0;
+	//! total number of properties boost::fusion::vector
+	static const unsigned int max_prop = 1;
+};
+
+const std::string nm_e::attributes::name[] = {"communication"};
+
+struct nm_part_v
+{
+	//! The node contain 3 unsigned long integer for comunication computation and memory
+	typedef boost::fusion::vector<size_t> type;
+
+	typedef typename memory_traits_inte<type>::type memory_int;
+	typedef typename memory_traits_lin<type>::type memory_lin;
+
+	typedef float s_type;
+
+	//! Attributes name
+	struct attributes
+	{
+		static const std::string name[];
+	};
+
+	//! The data
+
+	type data;
+
+	//! partition id in the boost::fusion::vector
+	static const unsigned int id = 0;
+
+	//! total number of properties
+	static const unsigned int max_prop = 1;
+};
+
+const std::string nm_part_v::attributes::name[] = {"id"};
+
+struct nm_part_e
+{
+	//! The node contain 3 unsigned long integer for comunication computation and memory
+	typedef boost::fusion::vector<> type;
+
+	typedef typename memory_traits_inte<type>::type memory_int;
+	typedef typename memory_traits_lin<type>::type memory_lin;
+
+	//! The data
+
+	type data;
+
+	//! total number of properties
+	static const unsigned int max_prop = 0;
+};
+
+BOOST_AUTO_TEST_SUITE( Metis_test )
+
+BOOST_AUTO_TEST_CASE( Metis_test_use)
+{
+	CartesianGraphFactory<3,Graph_CSR<nm_v,nm_e>> g_factory;
+	CartesianGraphFactory<3,Graph_CSR<nm_part_v,nm_part_e>> g_factory_part;
+
+	// Cartesian grid
+	std::vector<size_t> sz;
+	sz.push_back(GS_SIZE);
+	sz.push_back(GS_SIZE);
+	sz.push_back(GS_SIZE);
+
+	// Box
+	Box<3,float> box({0.0,0.0,0.0},{1.0,1.0,1.0});
+
+	// Graph to decompose
+
+	Graph_CSR<nm_v,nm_e> g = g_factory.construct<nm_e::communication,float,2,0,1,2>(sz,box);
+
+	// Processor graph
+
+	Graph_CSR<nm_part_v,nm_part_e> gp = g_factory_part.construct<NO_EDGE,float,2>(sz,box);
+
+	// Convert the graph to metis
+
+	Metis<Graph_CSR<nm_v,nm_e>> met(g,16);
+
+	// decompose
+
+	met.decompose<nm_part_v::id>(gp);
+	met.decompose<nm_v::id>();
+
+	// Write the VTK file
+
+	VTKWriter<Graph_CSR<nm_part_v,nm_part_e>> vtk(gp);
+	vtk.write("vtk_partition.vtk");
+	VTKWriter<Graph_CSR<nm_v,nm_e>> vtk2(g);
+	vtk2.write("vtk_partition2.vtk");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+#endif
-- 
GitLab