diff --git a/src/Grid/grid_dist_id.hpp b/src/Grid/grid_dist_id.hpp
index 9b109c73ab1e249cdb5cb965c8cf98f00242d596..8d4501c86ce162f7b275f78c46fa6a1d79811dc4 100644
--- a/src/Grid/grid_dist_id.hpp
+++ b/src/Grid/grid_dist_id.hpp
@@ -1691,23 +1691,42 @@ public:
 	void map()
 	{
 		getGlobalGridsInfo(gdb_ext_global);
-
-		std::cout << "Global size: " << gdb_ext_global.size() << std::endl;
 /*
+		std::cout << "Global size: " << gdb_ext_global.size() << std::endl;
+
 		for (size_t i = 0; i < gdb_ext_global.size(); i++)
 		{
 			std::cout << "(" << gdb_ext_global.get(i).Dbox.getLow(0) << "; " << gdb_ext_global.get(i).Dbox.getLow(1) << "); (" << gdb_ext_global.get(i).Dbox.getHigh(0) << "; " << gdb_ext_global.get(i).Dbox.getHigh(1) << ")" << std::endl;
 			std::cout << "I = " << i << ", Origin is (" << gdb_ext_global.get(i).origin.get(0) << "; " << gdb_ext_global.get(i).origin.get(1) << ")" << std::endl;
 		}
-*/
-		for (size_t i = 0; i < gdb_ext.size(); i++)
+
+		if (v_cl.getProcessUnitID() == 0)
 		{
-			Box<dim,long int> box = gdb_ext.get(i).Dbox;
-			box += gdb_ext.get(i).origin;
-			std::cout << "(" << box.getLow(0) << "; " << box.getLow(1) << "); (" << box.getHigh(0) << "; " << box.getHigh(1) << ")" << std::endl;
+			for (size_t i = 0; i < gdb_ext.size(); i++)
+			{
+				Box<dim,long int> box = gdb_ext.get(i).Dbox;
+				box += gdb_ext.get(i).origin;
+				std::cout << "(" << box.getLow(0) << "; " << box.getLow(1) << "); (" << box.getHigh(0) << "; " << box.getHigh(1) << ")" << std::endl;
+			}
 		}
 
-		this->template map_(domain,dec,cd_sm,loc_grid,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global);
+		if (v_cl.getProcessUnitID() == 0)
+		{
+			for (size_t i = 0; i < loc_grid_old.size(); i++)
+			{
+				Point<dim,St> p1;
+				Point<dim,St> p2;
+				for (size_t n = 0; n < dim; n++)
+				{
+					p1.get(n) = loc_grid_old.get(i).getGrid().getBox().getLow(n);
+					p2.get(n) = loc_grid_old.get(i).getGrid().getBox().getHigh(n);
+				}
+
+				std::cout << "Loc_grid_old: (" << p1.get(0) << "; " << p1.get(1) << "); (" << p2.get(0) << "; " << p2.get(1) << "); " << "Gdb_ext_old: (" << gdb_ext_old.get(i).Dbox.getLow(0) << "; " << gdb_ext_old.get(i).Dbox.getLow(1) << "); (" << gdb_ext_old.get(i).Dbox.getHigh(0) << "; " << gdb_ext_old.get(i).Dbox.getHigh(1) << ")" << std::endl;
+			}
+		}
+*/
+		this->template map_(dec,cd_sm,loc_grid,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global);
 	}
 
 	void gdb_ext_info()
@@ -1857,6 +1876,16 @@ public:
 		//Write a data set 2 to a file
 		H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
 
+		for (size_t i = 0; i < gdb_ext.size(); i++)
+		{
+			Box<dim,long int> box = gdb_ext.get(i).Dbox;
+			std::cout << "Dboxes saved: (" << box.getLow(0) << "; " << box.getLow(1) << "); (" << box.getHigh(0) << "; " << box.getHigh(1) << ")" << std::endl;
+		}
+
+		for (size_t i = 0; i < loc_grid.size(); i++)
+		{
+			std::cout << "loc_grids saved: (" << loc_grid.get(i).getGrid().getBox().getLow(0) << "; " << loc_grid.get(i).getGrid().getBox().getLow(1) << "); (" << loc_grid.get(i).getGrid().getBox().getHigh(0) << "; " << loc_grid.get(i).getGrid().getBox().getHigh(1) << ")" << std::endl;
+		}
 
 	    //Close/release resources.
 	    H5Dclose(file_dataset);
@@ -1868,86 +1897,9 @@ public:
 	    H5Fclose(file);
 	}
 
-	inline void load(const std::string & filename)
+	void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2)
 	{
-		MPI_Comm comm = v_cl.getMPIComm();
-		MPI_Info info  = MPI_INFO_NULL;
-
-		int mpi_rank = v_cl.getProcessUnitID();
-		int mpi_size = v_cl.getProcessingUnits();
-
-		// Set up file access property list with parallel I/O access
-		hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
-		H5Pset_fapl_mpio(plist_id, comm, info);
-
-		//Open a file
-	    hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id);
-	    H5Pclose(plist_id);
-
-	    //Open dataset
-	    hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT);
-
-	    //Create property list for collective dataset read
-	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
-	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
-
-		//Select file dataspace
-		hid_t file_dataspace_id = H5Dget_space(dataset);
-
-		hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
-
-		//if (mpi_rank == 0)
-			//printf ("\nOld MPI size: %llu\n", mpi_size_old);
-
-	  	//Where to read metadata
-	  	int metadata_out[mpi_size_old];
-
-	  	for (int i = 0; i < mpi_size_old; i++)
-	  	{
-	  		metadata_out[i] = 0;
-	  	}
-
-		//Size for data space in memory
-		hsize_t mdim[1] = {(size_t)mpi_size_old};
-
-		//Create data space in memory
-		hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
-
-
-		if (mpi_rank == 0)
-		{
-			hssize_t size;
-
-			size = H5Sget_select_npoints (mem_dataspace_id);
-			printf ("\nmemspace_id size: %llu\n", size);
-			size = H5Sget_select_npoints (file_dataspace_id);
-			printf ("dataspace_id size: %llu\n", size);
-		}
-
-	  	// Read the dataset.
-	    H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
-
-		if (mpi_rank == 0)
-		{
-			std::cout << "Metadata_out[]: ";
-			for (int i = 0; i < mpi_size_old; i++)
-			{
-				std::cout << metadata_out[i] << " ";
-			}
-			std::cout << " " << std::endl;
-		}
-
-	    //Open dataset
-	    hid_t dataset_2 = H5Dopen (file, "grid_dist", H5P_DEFAULT);
-
-	    //Create property list for collective dataset read
-	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
-	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
-
-	  	hsize_t block[1] = {0};
-	  	hsize_t block_add[1] = {0};
-
-	  	if (mpi_size >= mpi_size_old)
+/*	  	if (mpi_size >= mpi_size_old)
 	  	{
 			if (mpi_rank >= mpi_size_old)
 				block[0] = 0;
@@ -1960,18 +1912,38 @@ public:
 	  		int shift = mpi_rank*x;
 	  		for (int i = 0; i < x; i++)
 	  		{
+	  			//block0.get(mpi_rank).add(metadata_out[shift]);
 	  			block[0] += metadata_out[shift];
 	  			shift++;
 	  		}
 	  		int y = mpi_size_old%mpi_size;
 	  		if (mpi_rank < y)
+	  		{
 				block_add[0] += metadata_out[mpi_size*x+mpi_rank];
-	  	}
+				//block_add0.get(mpi_rank).add(metadata_out[mpi_size*x+mpi_rank]);
+	  		}
+	  	}*/
+
+		std::cout << "BID: " << bid << std::endl;
+
+		hsize_t offset[1];
+		hsize_t block[1];
+
+		if (bid < mpi_size_old && bid != -1)
+		{
+			offset[0] = metadata_accum.get(bid);
+			block[0] = metadata_out[bid];
+		}
+		else
+		{
+			offset[0] = 0;
+			block[0] = 0;
+		}
 
-	  	hsize_t offset[1] = {0};
-	    hsize_t offset_add[1] = {0};
+		std::cout << "Offset: " << offset[0] << "; Block: " << block[0]<<  std::endl;
+//	    hsize_t offset_add[1] = {0};
 
-	    if (mpi_size >= mpi_size_old)
+/*	    if (mpi_size >= mpi_size_old)
 		{
 			if (mpi_rank >= mpi_size_old)
 				offset[0] = 0;
@@ -1989,21 +1961,35 @@ public:
 	  		for (int i = 0; i < shift; i++)
 	  		{
 	  			offset[0] += metadata_out[i];
+	  			//offset0.get(mpi_rank).add(metadata_out[i]);
 	  		}
 
 	  		int y = mpi_size_old%mpi_size;
 	  		if (mpi_rank < y)
 	  		{
 	  			for (int i = 0; i < mpi_size*x + mpi_rank; i++)
+	  			{
 	  				offset_add[0] += metadata_out[i];
+	  				//offset_add0.get(mpi_rank).add(metadata_out[i]);
+	  			}
 	  		}
-	    }
+	    }*/
 
 	    //hsize_t stride[1] = {1};
 	    hsize_t count[1] = {1};
 
-	    std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
-
+	    //std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
+/*
+	    std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << std::endl;
+	    for (size_t i = 0; i < offset0.get(mpi_rank).size(); i++)
+	    	std::cout << ", Offset: " << offset0.get(mpi_rank).get(i) << std::endl;
+		for (size_t i = 0; i < offset_add0.get(mpi_rank).size(); i++)
+			std::cout << ", Offset_add: " << offset_add0.get(mpi_rank).get(i) << std::endl;
+		for (size_t i = 0; i < block0.get(mpi_rank).size(); i++)
+			std::cout << ", Block: " << block0.get(mpi_rank).get(i) << std::endl;
+		for (size_t i = 0; i < block_add0.get(mpi_rank).size(); i++)
+			std::cout << ", Block_add: " << block_add0.get(mpi_rank).get(i) << std::endl;
+*/
 
 		//Select file dataspace
 		hid_t file_dataspace_id_2 = H5Dget_space(dataset_2);
@@ -2011,18 +1997,26 @@ public:
         H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block);
 
 		//Select file dataspace
-		hid_t file_dataspace_id_3 = H5Dget_space(dataset_2);
+/*		hid_t file_dataspace_id_3 = H5Dget_space(dataset_2);
 
-        H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);
+        H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);*/
 
         hsize_t mdim_2[1] = {block[0]};
-        hsize_t mdim_3[1] = {block_add[0]};
+//        hsize_t mdim_3[1] = {block_add[0]};
+
+
+		//Size for data space in memory
+
+		/*if (mpi_rank >= mpi_size_old)
+			mdim_2[0] = 0;
+		else
+			mdim_2[0] = metadata_out[mpi_rank];*/
 
 		//Create data space in memory
 		hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
-		hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
+//		hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
 
-		if (mpi_rank == 0)
+		//if (mpi_rank == 0)
 		{
 			hssize_t size2;
 
@@ -2031,7 +2025,7 @@ public:
 			size2 = H5Sget_select_npoints (file_dataspace_id_2);
 			printf ("LOAD: dataspace_id_2 size: %llu\n", size2);
 		}
-
+/*
 		if (mpi_rank == 0)
 		{
 			hssize_t size2;
@@ -2041,65 +2035,210 @@ public:
 			size2 = H5Sget_select_npoints (file_dataspace_id_3);
 			printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
 		}
-
-		size_t sum = 0;
+*/
+	size_t sum = 0;
 
 		for (int i = 0; i < mpi_size_old; i++)
 		{
 			sum += metadata_out[i];
 		}
 
-
 		std::cout << "LOAD: sum: " << sum << std::endl;
 
 		// allocate the memory
 		HeapMemory pmem;
-		HeapMemory pmem2;
+//		HeapMemory pmem2;
 		//pmem.allocate(req);
 		ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
 		mem.incRef();
-		ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
-		mem2.incRef();
+//		ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
+//		mem2.incRef();
 
 	  	// Read the dataset.
 	    H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
 
 	    // Read the dataset.
-		H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
+//		H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
 
 		mem.allocate(pmem.size());
-		mem2.allocate(pmem2.size());
-		std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl;
+//		mem2.allocate(pmem2.size());
+		std::cout << "Mem.size(): " << mem.size() << " = " << block[0] << std::endl;
 
 		Unpack_stat ps;
 
-		Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem,loc_grid_old,ps);
-		Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem,gdb_ext_old,ps);
-
-		Unpack_stat ps2;
-
-
 		openfpm::vector<device_grid> loc_grid_old_unp;
 		openfpm::vector<GBoxes<device_grid::dims>> gdb_ext_old_unp;
 
-		Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem2,loc_grid_old_unp,ps2);
-		Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem2,gdb_ext_old_unp,ps2);
-
+		Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem,loc_grid_old_unp,ps,1);
+		Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem,gdb_ext_old_unp,ps,1);
+/*
 		std::cout << "Loc_grid_old.size() before merge: " << loc_grid_old.size() << std::endl;
 		std::cout << "Gdb_ext_old.size() before merge: " << gdb_ext_old.size() << std::endl;
 
 		std::cout << "Loc_grid_old_unp.size() before merge: " << loc_grid_old_unp.size() << std::endl;
 		std::cout << "Gdb_ext_old_unp.size() before merge: " << gdb_ext_old_unp.size() << std::endl;
-
+*/
 		for (size_t i = 0; i < loc_grid_old_unp.size(); i++)
 			loc_grid_old.add(loc_grid_old_unp.get(i));
 
 		for (size_t i = 0; i < gdb_ext_old_unp.size(); i++)
 			gdb_ext_old.add(gdb_ext_old_unp.get(i));
 
-		std::cout << "Loc_grid_old.size() after merge: " << loc_grid_old.size() << std::endl;
-		std::cout << "Gdb_ext_old.size() after merge: " << gdb_ext_old.size() << std::endl;
-		std::cout << "*********************************" << std::endl;
+//		std::cout << "Loc_grid_old.size() after merge: " << loc_grid_old.size() << std::endl;
+//		std::cout << "Gdb_ext_old.size() after merge: " << gdb_ext_old.size() << std::endl;
+//		std::cout << "*********************************" << std::endl;
+
+		mem.decRef();
+		delete &mem;
+
+	}
+
+	inline void load(const std::string & filename)
+	{
+		MPI_Comm comm = v_cl.getMPIComm();
+		MPI_Info info  = MPI_INFO_NULL;
+
+		int mpi_rank = v_cl.getProcessUnitID();
+		//int mpi_size = v_cl.getProcessingUnits();
+
+		// Set up file access property list with parallel I/O access
+		hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
+		H5Pset_fapl_mpio(plist_id, comm, info);
+
+		//Open a file
+	    hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id);
+	    H5Pclose(plist_id);
+
+	    //Open dataset
+	    hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT);
+
+	    //Create property list for collective dataset read
+	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
+	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+		//Select file dataspace
+		hid_t file_dataspace_id = H5Dget_space(dataset);
+
+		hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
+
+		//if (mpi_rank == 0)
+			//printf ("\nOld MPI size: %llu\n", mpi_size_old);
+
+	  	//Where to read metadata
+	  	int metadata_out[mpi_size_old];
+
+	  	for (int i = 0; i < mpi_size_old; i++)
+	  	{
+	  		metadata_out[i] = 0;
+	  	}
+
+		//Size for data space in memory
+		hsize_t mdim[1] = {(size_t)mpi_size_old};
+
+		//Create data space in memory
+		hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
+
+/*
+		if (mpi_rank == 0)
+		{
+			hssize_t size;
+
+			size = H5Sget_select_npoints (mem_dataspace_id);
+			printf ("\nmemspace_id size: %llu\n", size);
+			size = H5Sget_select_npoints (file_dataspace_id);
+			printf ("dataspace_id size: %llu\n", size);
+		}
+*/
+	  	// Read the dataset.
+	    H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
+/*
+		if (mpi_rank == 0)
+		{
+			std::cout << "Metadata_out[]: ";
+			for (int i = 0; i < mpi_size_old; i++)
+			{
+				std::cout << metadata_out[i] << " ";
+			}
+			std::cout << " " << std::endl;
+		}
+*/
+
+	    openfpm::vector<size_t> metadata_accum;
+	    metadata_accum.resize(mpi_size_old);
+
+	    metadata_accum.get(0) = 0;
+	    for (int i = 1 ; i < mpi_size_old ; i++)
+	    	metadata_accum.get(i) = metadata_accum.get(i-1) + metadata_out[i-1];
+
+	    //Open dataset
+	    hid_t dataset_2 = H5Dopen (file, "grid_dist", H5P_DEFAULT);
+
+	    //Create property list for collective dataset read
+	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
+	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+	  	/////////////////////////////////////
+
+	  	openfpm::vector<size_t> n_block;
+	  	n_block.resize(v_cl.getProcessingUnits());
+
+
+	  	for(size_t i = 0 ; i < n_block.size() ; i++)
+	  		n_block.get(i) = mpi_size_old / v_cl.getProcessingUnits();
+
+	  	size_t rest_block = mpi_size_old % v_cl.getProcessingUnits();
+
+	  	std::cout << "MPI size old: " << mpi_size_old << std::endl;
+	  	std::cout << "MPI size: " << v_cl.getProcessingUnits() << std::endl;
+
+
+	  	std::cout << "Rest block: " << rest_block << std::endl;
+
+	  	size_t max_block;
+
+	  	if (rest_block != 0)
+	  		max_block = n_block.get(0) + 1;
+	  	else
+	  		max_block = n_block.get(0);
+
+	  	//for(size_t i = 0 ; i < n_block.size() ; i++)
+	  	for(size_t i = 0 ; i < rest_block ; i++)
+	  		n_block.get(i) += 1;
+
+
+	  	for(size_t i = 0 ; i < n_block.size() ; i++)
+	  		std::cout << "n_block.get(i): " << n_block.get(i) << std::endl;
+
+	  	size_t start_block = 0;
+	  	size_t stop_block = 0;
+
+
+	  	if (v_cl.getProcessUnitID() != 0)
+	  	{
+			for(size_t i = 0 ; i < v_cl.getProcessUnitID() ; i++)
+				start_block += n_block.get(i);
+	  	}
+
+	  	stop_block = start_block + n_block.get(v_cl.getProcessUnitID());
+
+	  	std::cout << "ID: " << v_cl.getProcessUnitID() << "; Start block: " << start_block << "; " << "Stop block: " << stop_block << std::endl;
+
+	  	if (mpi_rank >= mpi_size_old)
+	  		load_block(start_block,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2);
+	  	else
+	  	{
+	  		size_t n_bl = 0;
+	  		size_t lb = start_block;
+			for ( ; lb < stop_block ; lb++, n_bl++)
+				load_block(lb,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2);
+
+			if (n_bl < max_block)
+				load_block(-1,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2);
+	  	}
+
+	  	////////////////////////////////////
+
+		//std::cout << "LOAD: sum: " << sum << std::endl;
 
 	    // Close the dataset.
 	    H5Dclose(dataset);
@@ -2108,11 +2247,13 @@ public:
 	    H5Fclose(file);
 	    H5Pclose(plist_id);
 
-		mem.decRef();
-		delete &mem;
-
 		// Map the distributed grid
 		map();
+
+		for (size_t i = 0; i < loc_grid.size(); i++)
+		{
+			std::cout << "loc_grids loaded: (" << loc_grid.get(i).getGrid().getBox().getLow(0) << "; " << loc_grid.get(i).getGrid().getBox().getLow(1) << "); (" << loc_grid.get(i).getGrid().getBox().getHigh(0) << "; " << loc_grid.get(i).getGrid().getBox().getHigh(1) << ")" << std::endl;
+		}
 	}
 
 	//! Define friend classes
diff --git a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp
index 67379b07fa76a395da76d1745aba85e8ef56f77b..14e7ee7aee3bf4b11592e0b83ed12a890faa5214 100644
--- a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp
+++ b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp
@@ -18,7 +18,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
 	// Input data
 	size_t k = 10;
 
-	size_t ghost_part = 0.2;
+	float ghost_part = 0.0;
 
 	// Domain
 	Box<2,float> domain({0.0,0.0},{1.0,1.0});
@@ -50,6 +50,36 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
 	bool val = dec.check_consistency();
 	BOOST_REQUIRE_EQUAL(val,true);
 
+	size_t count = 0;
+
+	auto it = g_dist.getDomainIterator();
+
+	while (it.isNext())
+	{
+		//key
+		auto key = it.get();
+
+		auto keyg = g_dist.getGKey(key);
+
+		g_dist.template get<0>(key) = keyg.get(0);
+
+		++it;
+		count++;
+	}
+
+	std::cout << "Count: " << count << std::endl;
+
+	openfpm::vector<size_t> count_total;
+	v_cl.allGather(count,count_total);
+	v_cl.execute();
+
+	size_t sum = 0;
+
+	for (size_t i = 0; i < count_total.size(); i++)
+		sum += count_total.get(i);
+
+	std::cout << "Sum: " << sum << std::endl;
+
 	timer t;
 	t.start();
 	// Save the grid
@@ -65,7 +95,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
 	// Input data
 	size_t k = 10;
 
-	size_t ghost_part = 0.2;
+	float ghost_part = 0.0;
 
 	// Domain
 	Box<2,float> domain({0.0,0.0},{1.0,1.0});
@@ -77,7 +107,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
 		return;
 
 	if (v_cl.getProcessUnitID() == 0)
-			std::cout << "Loading Distributed 2D Grid..." << std::endl;
+		std::cout << "Loading Distributed 2D Grid..." << std::endl;
 
 	// grid size
 	size_t sz[2];
@@ -90,29 +120,42 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
 	// Distributed grid with id decomposition
 	grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g);
 
+	g_dist.getDecomposition().write("Before_load_grid_decomposition");
+	g_dist.write("Before_Loaded_grid");
+
 	timer t;
 	t.start();
 	// Save the grid
 	g_dist.load("grid_dist_id.h5");
 	t.stop();
 
+	g_dist.write("Loaded_grid");
+	g_dist.getDecomposition().write("Loaded_grid_decomposition");
+
 	std::cout << "Loading time: " << t.getwct() << std::endl;
 
-	auto it = g_dist.getOldDomainIterator();
+	auto it = g_dist.getDomainIterator();
 
 	size_t count = 0;
 
 	while (it.isNext())
 	{
 		//key
-		grid_dist_key_dx<2> key = it.get();
+		auto key = it.get();
 
-		//g_dist.get(key);
+		//BOOST_CHECK_CLOSE(g_dist.template get<0>(key),1,0.0001);
+		//std::cout << "Element: " << g_dist.template get<0>(key) << std::endl;
+
+		auto keyg = g_dist.getGKey(key);
+
+		BOOST_REQUIRE_EQUAL(g_dist.template get<0>(key), keyg.get(0));
 
 		++it;
 		count++;
 	}
 
+	std::cout << "COOOOOOUNT: " << count << std::endl;
+
 	openfpm::vector<size_t> count_total;
 	v_cl.allGather(count,count_total);
 	v_cl.execute();
@@ -130,7 +173,7 @@ BOOST_AUTO_TEST_CASE( grid_gdb_test )
 	// Input data
 	size_t k = 10;
 
-	size_t ghost_part = 0.2;
+	float ghost_part = 0.2;
 
 	// Domain
 	Box<2,float> domain({0.0,0.0},{1.0,1.0});
diff --git a/src/Grid/grid_dist_id_comm.hpp b/src/Grid/grid_dist_id_comm.hpp
index 2a04183a61cd41cf2d2b177aca72ba47f323e8b3..b6b3f6578aa696b486d458c16c2e9bfe4ed3b06b 100644
--- a/src/Grid/grid_dist_id_comm.hpp
+++ b/src/Grid/grid_dist_id_comm.hpp
@@ -9,6 +9,8 @@
 #define SRC_GRID_GRID_DIST_ID_COMM_HPP_
 
 #include "Vector/vector_dist_ofb.hpp"
+#include "data_type/scalar.hpp"
+
 
 /*! \brief This class is an helper for the communication of grid_dist_id
  *
@@ -42,7 +44,7 @@ class grid_dist_id_comm
 	//! \warning m_oGrid is assumed to be an ordered list
 	//! first id is grid
 	//! second id is the processor id
-	openfpm::vector<openfpm::vector<device_grid>> m_oGrid;
+	openfpm::vector<openfpm::vector<aggregate<device_grid,SpaceBox<dim,long int>>>> m_oGrid;
 
 public:
 
@@ -50,28 +52,121 @@ public:
 	 *
 	 * \param m_oGrid_recv Vector of labeled grids to combine into a local grid
 	 */
-	inline void grids_reconstruct(openfpm::vector<openfpm::vector<device_grid>> & m_oGrid_recv, openfpm::vector<device_grid> & loc_grid)
+	inline void grids_reconstruct(openfpm::vector<openfpm::vector<aggregate<device_grid,SpaceBox<dim,long int>>>> & m_oGrid_recv, openfpm::vector<device_grid> & loc_grid, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm)
 	{
+		size_t count2 = 0;
+		for (size_t a = 0; a < m_oGrid_recv.size(); a++)
+		{
+			for (size_t k = 0; k < m_oGrid_recv.get(a).size(); k++)
+			{
+				device_grid g = m_oGrid_recv.get(a).template get<0>(k);
+
+				size_t count = 0;
+
+
+				auto it = g.getIterator();
+
+				while (it.isNext())
+				{
+					//auto key = it.get();
+
+					//if (g.template get<0>(key) != 1)
+						//std::cout << "WRONG???????" << std::endl;
+
+					++it;
+					count++;
+				}
+
+				SpaceBox<dim,long int> b = m_oGrid_recv.get(a).template get<1>(k);
+
+				//device_grid gr_send(sz);
+				//gr_send.setMemory();
+
+				std::cout << "B: (" << b.getLow(0) << "; " << b.getLow(1) << "); (" << b.getHigh(0) << "; " << b.getHigh(1) << "); " << "G: (" << g.getGrid().getBox().getHigh(0) << "; " << g.getGrid().getBox().getHigh(1) << ")" << std::endl;
+
+				// Set the dimensions of the local grid
+				//g.resize(l_res);
+
+				Point<dim,St> p;
+				for (size_t n = 0; n < dim; n++)
+					p.get(n) = g.getGrid().getBox().getHigh(n);
 
+				//std::cout << "G after resize: (" << g.getGrid().getBox().getLow(0) << "; " << g.getGrid().getBox().getLow(1) << "); (" << g.getGrid().getBox().getHigh(0) << "; " << g.getGrid().getBox().getHigh(1) << ")" << std::endl;
+
+				Point<dim,St> point;
+				for (size_t n = 0; n < dim; n++)
+					point.get(n) = (b.getHigh(n) + b.getLow(n))/2;
+
+				for (size_t j = 0; j < gdb_ext.size(); j++)
+				{
+					// Local sub-domain
+					SpaceBox<dim,long int> sub = gdb_ext.get(j).Dbox;
+					sub += gdb_ext.get(j).origin;
+
+					if (sub.isInside(point) == true)
+					{
+						grid_key_dx<dim> start = b.getKP1() - grid_key_dx<dim>(gdb_ext.get(j).origin.asArray());
+						grid_key_dx<dim> stop = b.getKP2() - grid_key_dx<dim>(gdb_ext.get(j).origin.asArray());
+
+						std::string start2 = start.to_string();
+						std::string stop2 = stop.to_string();
+
+						auto it = loc_grid.get(j).getSubIterator(start,stop);
+
+						// Copy selected elements into a local grid
+						while (it.isNext())
+						{
+							auto key = it.get();
+							std::string str = key.to_string();
+							grid_key_dx<dim> key2 = key - start;
+
+							//std::cout << "Key: " << str << std::endl;
+							loc_grid.get(j).get_o(key) = g.get_o(key2);
+							count2++;
+
+							////////// DEBUG ///////////////
+							if (g.template get<0>(key2) != 1)
+							{
+								//std::cout << "WRONG ZZZZ" << std::endl;
+								//std::cout << "Start: " << start2 << "; Stop: " << stop2 << "; G size: (" << p.get(0) << "; " << p.get(1) << "); Key: " << str << std::endl;
+
+							}
+							++it;
+						}
+					}
+				}
+			}
+		}
+		std::cout << "Count after: " << count2 << std::endl;
 	}
 
 
+	/*! \brief Reconstruct the local grids
+	 *
+	 * \param m_oGrid_recv Vector of labeled grids to combine into a local grid
+	 */
+	inline void grids_reconstruct(openfpm::vector<openfpm::vector<aggregate<device_grid,SpaceBox<dim,long int>>>> & m_oGrid_recv, openfpm::vector<device_grid> & loc_grid, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<size_t> & prc_r)
+	{
 
+	}
 
 	/*! \brief Label intersection grids for mappings
 	 *
 	 * \param prc_sz For each processor the number of grids to send to
 	 */
-	inline void labelIntersectionGridsProcessor(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global, openfpm::vector<openfpm::vector<device_grid>> & lbl_b, openfpm::vector<size_t> & prc_sz)
+	inline void labelIntersectionGridsProcessor(Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global, openfpm::vector<openfpm::vector<aggregate<device_grid,SpaceBox<dim,long int>>>> & lbl_b, openfpm::vector<size_t> & prc_sz)
 	{
 		// resize the label buffer
 		lbl_b.resize(v_cl.getProcessingUnits());
 
+		size_t count = 0;
+		size_t count2 = 0;
+
 		// Label all the intersection grids with the processor id where they should go
 
 		for (size_t i = 0; i < gdb_ext_old.size(); i++)
 		{
-			// Local old sub-domain
+			// Local old sub-domain in global coordinates
 			SpaceBox<dim,long int> sub_dom = gdb_ext_old.get(i).Dbox;
 			sub_dom += gdb_ext_old.get(i).origin;
 
@@ -82,26 +177,22 @@ public:
 				// Intersection box
 				SpaceBox<dim,long int> inte_box;
 
-				// Global new sub-domain
+				// Global new sub-domain in global coordinates
 				SpaceBox<dim,long int> sub_dom_new = gdb_ext_global.get(j).Dbox;
 				sub_dom_new += gdb_ext_global.get(j).origin;
 
-				bool intersect = sub_dom.Intersect(sub_dom_new, inte_box);
+				bool intersect = false;
+
+				if (sub_dom.isValid() == true && sub_dom_new.isValid() == true)
+					intersect = sub_dom.Intersect(sub_dom_new, inte_box);
 
 				if (intersect == true)
 				{
+					//// DEBUG/////
+					count2++;
+					//////////////
 
-					//std::cout << "Inte_box: (" << inte_box.getLow(0) << "; " << inte_box.getLow(1) << "); (" << inte_box.getHigh(0) << "; " << inte_box.getHigh(1) << ")" << std::endl;
-
-/*
-					// Grid to send size
-					size_t sz1[dim];
-					for (size_t l = 0; l < dim; l++)
-					{
-						sz1[l] = inte_box.getHigh(l) - inte_box.getLow(l);
-						std::cout << "Cont. size on " << l << " dimension: " << sz1[l] << std::endl;
-					}
-*/
+					std::cout << "Inte_box: (" << inte_box.getLow(0) << "; " << inte_box.getLow(1) << "); (" << inte_box.getHigh(0) << "; " << inte_box.getHigh(1) << ")" << std::endl;
 
 					auto inte_box_cont = cd_sm.convertCellUnitsIntoDomainSpace(inte_box);
 
@@ -110,38 +201,37 @@ public:
 					for (size_t n = 0; n < dim; n++)
 						p.get(n) = (inte_box_cont.getHigh(n) + inte_box_cont.getLow(n))/2;
 
-					std::cout << "Point: (" << p.get(0) << "; " << p.get(1) << ")" << std::endl;
+					//std::cout << "Point: (" << p.get(0) << "; " << p.get(1) << ")" << std::endl;
 
 					p_id = dec.processorID(p);
 					prc_sz.get(p_id)++;
 
 					std::cout << "P_id: " << p_id << std::endl;
 
-					// Convert intersection box from contiguous to discrete
-					//SpaceBox<dim,long int> inte_box_discr = cd_sm.convertDomainSpaceIntoGridUnits(inte_box,dec.periodicity());
-
-
-					//std::cout << "Beg:" << inte_box_discr.getHigh(0) << "; " << inte_box.getHigh(1) << std::endl;
-					//std::cout << "End:" << inte_box_discr.getLow(0) << "; " << inte_box.getLow(1) << std::endl;
-
 					// Transform coordinates to local
 					auto inte_box_local = inte_box;
 
-					inte_box_local -= gdb_ext_global.get(j).origin;
+					inte_box_local -= gdb_ext_old.get(i).origin;
+
+					std::cout << "gdb_ext_old.get(i): (" << sub_dom.getLow(0) << "; " << sub_dom.getLow(1) << "); (" << sub_dom.getHigh(0) << "; " << sub_dom.getHigh(1) << ")" << std::endl;
 
-					//std::cout << "Inte_box_local: (" << inte_box_local.getLow(0) << "; " << inte_box_local.getLow(1) << "); (" << inte_box_local.getHigh(0) << "; " << inte_box_local.getHigh(1) << ")" << std::endl;
+					std::cout << "gdb_ext_global.get(j): (" << sub_dom_new.getLow(0) << "; " << sub_dom_new.getLow(1) << "); (" << sub_dom_new.getHigh(0) << "; " << sub_dom_new.getHigh(1) << ")" << std::endl;
+
+					std::cout << "Inte_box_local: (" << inte_box_local.getLow(0) << "; " << inte_box_local.getLow(1) << "); (" << inte_box_local.getHigh(0) << "; " << inte_box_local.getHigh(1) << ")" << std::endl;
 
 					// Grid corresponding for gdb_ext_old.get(i) box
-					device_grid gr = loc_grid_old.get(i);
+					device_grid & gr = loc_grid_old.get(i);
 
-					for (size_t l = 0; l < dim; l++)
-						std::cout << "GR Size on " << l << " dimension: " << gr.getGrid().size(l) << std::endl;
+					std::cout << "loc_grid_old.get(i): (" << gr.getGrid().getBox().getLow(0) << "; " << gr.getGrid().getBox().getLow(1) << "); (" << gr.getGrid().getBox().getHigh(0) << "; " << gr.getGrid().getBox().getHigh(1) << ")" << std::endl;
+
+					//for (size_t l = 0; l < dim; l++)
+						//std::cout << "loc_grid_old.get(i).size on " << l << " dimension: " << gr.getGrid().size(l) << std::endl;
 					// Size of the grid to send
 					size_t sz[dim];
 					for (size_t l = 0; l < dim; l++)
 					{
 						sz[l] = inte_box_local.getHigh(l) - inte_box_local.getLow(l) + 1;
-						std::cout << "GR_send size on " << l << " dimension: " << sz[l] << std::endl;
+						//std::cout << "GR_send size on " << l << " dimension: " << sz[l] << std::endl;
 					}
 
 					// Grid to send
@@ -152,8 +242,35 @@ public:
 					grid_key_dx<dim> start = inte_box_local.getKP1();
 					grid_key_dx<dim> stop = inte_box_local.getKP2();
 
-					//std::string start2 = start.to_string();
-					//std::string stop2 = stop.to_string();
+					Point<dim,St> p1;
+					for (size_t n = 0; n < dim; n++)
+						p1.get(n) = gr_send.getGrid().getBox().getLow(n);
+
+					//std::cout << "Grid send P1: (" << p1.get(0) << "; " << p1.get(1) << ")" << std::endl;
+
+					Point<dim,St> p2;
+					for (size_t n = 0; n < dim; n++)
+						p2.get(n) = gr_send.getGrid().getBox().getHigh(n);
+
+					//std::cout << "Grid send P2: (" << p2.get(0) << "; " << p2.get(1) << ")" << std::endl;
+/*
+					Point<dim,St> p3;
+					for (size_t n = 0; n < dim; n++)
+						p3.get(n) = gr.getGrid().getBox().getLow(n);
+
+					std::cout << "Grid local P1: (" << p3.get(0) << "; " << p3.get(1) << ")" << std::endl;
+
+					Point<dim,St> p4;
+					for (size_t n = 0; n < dim; n++)
+						p4.get(n) = gr.getGrid().getBox().getHigh(n);
+
+					std::cout << "Grid local P2: (" << p4.get(0) << "; " << p4.get(1) << ")" << std::endl;
+
+*/
+					std::string start2 = start.to_string();
+					std::string stop2 = stop.to_string();
+
+					std::cout << "Start: " << start2 << "; Stop: " << stop2 << std::endl;
 
 					auto it = gr.getSubIterator(start,stop);
 
@@ -161,25 +278,56 @@ public:
 					while (it.isNext())
 					{
 						auto key = it.get();
+						grid_key_dx<dim> key2 = key - start;
 						std::string str = key.to_string();
 
-						std::cout << "Key: " << str << std::endl;
-						gr_send.get_o(key) = gr.get_o(key);
-
-						//gr_send.template get<0>(key) = gr.template get<0>(key);
-						//gr.template get<0>(key)
+						//std::cout << "Key: " << str << std::endl;
+						gr_send.get_o(key2) = gr.get_o(key);
+/*
+						////////// DEBUG ///////////////
+						if (gr.template get<0>(key) == 1)
+						{
+							count++;
+						}
+						else if (gr_send.template get<0>(key2) != 1)
+						{
+							std::cout << "AHHHHHHHHHH????????" << std::endl;
+						}
+*/
+						////////////////
 
 						//gr_send.set(key,gr,key);
 
 						++it;
 					}
+
+					aggregate<device_grid,SpaceBox<dim,long int>> aggr;
+
+					aggr.template get<0>() = gr_send;
+					aggr.template get<1>() = inte_box;
+
 					// Add to the labeling vector
-					lbl_b.get(p_id).add(gr_send);
-					//std::cout << "9" << std::endl;
+					lbl_b.get(p_id).add(aggr);
 
+					auto it2 = gr_send.getIterator();
+
+					while (it2.isNext())
+					{
+						auto key3 = it2.get();
+
+						if (gr_send.template get<0>(key3) != 1)
+						{
+							//std::string str = key3.to_string();
+							//std::cout << "Wrong!" << std::endl;
+							//std::cout << "Start: " << start2 << "; Stop: " << stop2 << "; Grid send size: (" << p2.get(0) << "; " << p2.get(1) << "); " << "Key: " << str << std::endl;
+						}
+						++it2;
+					}
 				}
 			}
 		}
+		std::cout << "Count for points: " << count << std::endl;
+		std::cout << "Count for inte_boxes: " << count2 << std::endl;
 	}
 
 	/*! \brief Moves all the grids that does not belong to the local processor to the respective processor
@@ -195,13 +343,34 @@ public:
 	 * \param g_m ghost marker
 	 *
 	 */
-	void map_(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global)
+	void map_(Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global)
 	{
 		// Processor communication size
 		openfpm::vector<size_t> prc_sz(v_cl.getProcessingUnits());
 
 		// Contains the processor id of each box (basically where they have to go)
-		labelIntersectionGridsProcessor(domain,dec,cd_sm,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global,m_oGrid,prc_sz);
+		labelIntersectionGridsProcessor(dec,cd_sm,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global,m_oGrid,prc_sz);
+/*
+		for (size_t i = 0; i < m_oGrid.size(); i++)
+		{
+			for (size_t k = 0; k < m_oGrid.get(i).size(); k++)
+			{
+				device_grid g = m_oGrid.get(i).template get<0>(k);
+
+				auto it = g.getIterator();
+
+				while (it.isNext())
+				{
+					auto key = it.get();
+
+					if (g.template get<0>(key) != 1)
+						std::cout << "WROOOOOOONG" << std::endl;
+
+					++it;
+				}
+			}
+		}
+*/
 
 		// Calculate the sending buffer size for each processor, put this information in
 		// a contiguous buffer
@@ -221,25 +390,97 @@ public:
 				prc_sz_r.add(m_oGrid.get(i).size());
 			}
 		}
+/*
+		for (size_t i = 0; i < m_oGrid.size(); i++)
+		{
+			if(m_oGrid.get(i).size() == 0)
+				m_oGrid.remove(i);
+		}
+*/
+
+		decltype(m_oGrid) m_oGrid_new;
+		for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
+		{
+			if (m_oGrid.get(i).size() != 0)
+				m_oGrid_new.add(m_oGrid.get(i));
+		}
 
 		// Vector for receiving of intersection grids
-		openfpm::vector<openfpm::vector<device_grid>> m_oGrid_recv;
+		openfpm::vector<openfpm::vector<aggregate<device_grid,SpaceBox<dim,long int>>>> m_oGrid_recv;
+
+		std::cout << "vcl.getProcessUnitID(): " << v_cl.getProcessUnitID() << "; prc_r.size(): " << prc_r.size() << std::endl;
 
-		m_oGrid_recv.resize(m_oGrid.size());
+		std::cout << "vcl.getProcessUnitID(): " << v_cl.getProcessUnitID() << "; m_oGrid_new.size(): " << m_oGrid_new.size() << std::endl;
+/*
 		for (size_t i = 0; i < m_oGrid.size(); i++)
 		{
-			m_oGrid_recv.get(i).resize(m_oGrid.get(i).size());
+			std::cout << "Processor ID:" << v_cl.getProcessUnitID() << "; I: " << i << ", Size: " << m_oGrid.get(i).size() << std::endl;
+		}
+*/
+/*
+		for (size_t i = 0; i < m_oGrid_new.size(); i++)
+		{
+			for (size_t k = 0; k < m_oGrid_new.get(i).size(); k++)
+			{
+				device_grid g = m_oGrid_new.get(i).template get<0>(k);
+
+				auto it = g.getIterator();
+
+				while (it.isNext())
+				{
+					auto key = it.get();
+
+					if (g.template get<0>(key) != 1)
+						std::cout << "WRONG BEFORE SENDRCV" << std::endl;
+
+					++it;
+				}
+			}
 		}
+*/
 
 		// Send and recieve intersection grids
-		v_cl.SSendRecv(m_oGrid,m_oGrid_recv,prc_r,prc_recv_map,recv_sz_map);
+		v_cl.SSendRecv(m_oGrid_new,m_oGrid_recv,prc_r,prc_recv_map,recv_sz_map);
+/*
+		for (size_t i = 0; i < m_oGrid_recv.size(); i++)
+		{
+			for (size_t k = 0; k < m_oGrid_recv.get(i).size(); k++)
+			{
+				device_grid g = m_oGrid_recv.get(i).template get<0>(k);
+
+				auto it = g.getIterator();
 
-		std::cout << "m_oGrid.size(): " << m_oGrid.size() << std::endl;
+				while (it.isNext())
+				{
+					auto key = it.get();
 
-		std::cout << "m_oGrid_recv.size(): " << m_oGrid_recv.size() << std::endl;
+					if (g.template get<0>(key) != 1)
+						std::cout << "WRONG AFTER SENDRCV" << std::endl;
 
+					++it;
+				}
+			}
+		}
+*/
+/*
+		std::cout << "vcl.getProcessUnitID(): " << v_cl.getProcessUnitID() << "; m_oGrid_recv.size(): " << m_oGrid_recv.size() << std::endl;
+
+		for (size_t i = 0; i < m_oGrid_recv.size(); i++)
+		{
+			std::cout << "Processor ID:" << v_cl.getProcessUnitID() << "; I_recv: " << i << ", Size: " << m_oGrid_recv.get(i).size() << std::endl;
+		}
+
+		for (size_t i = 0; i < prc_r.size(); i++)
+			std::cout << "vcl.getProcessUnitID(): " << v_cl.getProcessUnitID() << "; prc_r: " << prc_r.get(i) << std::endl;
+
+		for (size_t i = 0; i < prc_recv_map.size(); i++)
+			std::cout << "vcl.getProcessUnitID(): " << v_cl.getProcessUnitID() << "; prc_recv_map: " << prc_recv_map.get(i) << std::endl;
+
+		for (size_t i = 0; i < recv_sz_map.size(); i++)
+			std::cout << "vcl.getProcessUnitID(): " << v_cl.getProcessUnitID() << "; recv_sz_map: " << recv_sz_map.get(i) << std::endl;
+*/
 		// Reconstruct the new local grids
-		//grids_reconstruct(m_oGrid_recv,loc_grid);
+		grids_reconstruct(m_oGrid_recv,loc_grid,gdb_ext,cd_sm);
 	}
 
 	/*! \brief Constructor
diff --git a/src/Vector/vector_dist.hpp b/src/Vector/vector_dist.hpp
index 2d94c32ff7d656dd630d933d68e26ec156e20dc6..922becc6fa410202a02ba41282b1fc43233e6477 100644
--- a/src/Vector/vector_dist.hpp
+++ b/src/Vector/vector_dist.hpp
@@ -983,7 +983,7 @@ public:
 		Packer<decltype(v_pos),HeapMemory>::packRequest(v_pos,req);
 		Packer<decltype(v_prp),HeapMemory>::packRequest(v_prp,req);
 
-		//std::cout << "Req: " << req << std::endl;
+		std::cout << "Req: " << req << std::endl;
 
 		// allocate the memory
 		HeapMemory pmem;
@@ -1081,7 +1081,7 @@ public:
 	    		offset[0] += sz_others.get(i);
 	    }
 
-	    //std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
+	    std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
 
 	    int metadata[mpi_size];
 
@@ -1116,95 +1116,9 @@ public:
 	    H5Fclose(file);
 	}
 
-	inline void load(const std::string & filename)
+	void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2)
 	{
-		MPI_Comm comm = v_cl.getMPIComm();
-		MPI_Info info  = MPI_INFO_NULL;
-
-		int mpi_rank = v_cl.getProcessUnitID();
-		int mpi_size = v_cl.getProcessingUnits();
-
-		// Set up file access property list with parallel I/O access
-		hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
-		H5Pset_fapl_mpio(plist_id, comm, info);
-
-		//Open a file
-	    hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id);
-	    H5Pclose(plist_id);
-
-	    //Open dataset
-	    hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT);
-
-	    //Create property list for collective dataset read
-	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
-	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
-
-		//Select file dataspace
-		hid_t file_dataspace_id = H5Dget_space(dataset);
-
-		hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
-/*
-		if (mpi_rank == 0)
-			printf ("\nOld MPI size: %llu\n", mpi_size_old);
-*/
-	  	//Where to read metadata
-	  	int metadata_out[mpi_size_old];
-
-	  	for (int i = 0; i < mpi_size_old; i++)
-	  	{
-	  		metadata_out[i] = 0;
-	  	}
-
-		//Size for data space in memory
-		hsize_t mdim[1] = {(size_t)mpi_size_old};
-
-		//Create data space in memory
-		hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
-/*
-		if (mpi_rank == 0)
-		{
-			hssize_t size;
-
-			size = H5Sget_select_npoints (mem_dataspace_id);
-			printf ("LOAD: memspace_id size: %llu\n", size);
-			size = H5Sget_select_npoints (file_dataspace_id);
-			printf ("LOAD: dataspace_id size: %llu\n", size);
-		}
-*/
-	  	// Read the dataset.
-	    H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
-/*
-		if (mpi_rank == 0)
-		{
-			std::cout << "Metadata_out[]: ";
-			for (int i = 0; i < mpi_size_old; i++)
-			{
-				std::cout << metadata_out[i] << " ";
-			}
-			std::cout << " " << std::endl;
-		}
-*/
-	    //Open dataset
-	    hid_t dataset_2 = H5Dopen (file, "vector_dist", H5P_DEFAULT);
-
-	    //Create property list for collective dataset read
-	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
-	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
-
-	  	hsize_t block[1] = {0};
-	  	hsize_t block_add[1] = {0};
-/*
-	  	openfpm::vector<openfpm::vector<hsize_t>> block0;
-	  	openfpm::vector<openfpm::vector<hsize_t>> block_add0;
-	  	openfpm::vector<openfpm::vector<hsize_t>> offset0;
-	  	openfpm::vector<openfpm::vector<hsize_t>> offset_add0;
-
-	  	block0.resize(mpi_size);
-	  	offset0.resize(mpi_size);
-	  	block_add0.resize(mpi_size);
-		offset_add0.resize(mpi_size);
-*/
-	  	if (mpi_size >= mpi_size_old)
+/*	  	if (mpi_size >= mpi_size_old)
 	  	{
 			if (mpi_rank >= mpi_size_old)
 				block[0] = 0;
@@ -1227,12 +1141,28 @@ public:
 				block_add[0] += metadata_out[mpi_size*x+mpi_rank];
 				//block_add0.get(mpi_rank).add(metadata_out[mpi_size*x+mpi_rank]);
 	  		}
-	  	}
+	  	}*/
 
-	  	hsize_t offset[1] = {0};
-	    hsize_t offset_add[1] = {0};
+		std::cout << "BID: " << bid << std::endl;
 
-	    if (mpi_size >= mpi_size_old)
+		hsize_t offset[1];
+		hsize_t block[1];
+
+		if (bid < mpi_size_old && bid != -1)
+		{
+			offset[0] = metadata_accum.get(bid);
+			block[0] = metadata_out[bid];
+		}
+		else
+		{
+			offset[0] = 0;
+			block[0] = 0;
+		}
+
+		std::cout << "Offset: " << offset[0] << "; Block: " << block[0]<<  std::endl;
+//	    hsize_t offset_add[1] = {0};
+
+/*	    if (mpi_size >= mpi_size_old)
 		{
 			if (mpi_rank >= mpi_size_old)
 				offset[0] = 0;
@@ -1262,7 +1192,7 @@ public:
 	  				//offset_add0.get(mpi_rank).add(metadata_out[i]);
 	  			}
 	  		}
-	    }
+	    }*/
 
 	    //hsize_t stride[1] = {1};
 	    hsize_t count[1] = {1};
@@ -1286,12 +1216,12 @@ public:
         H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block);
 
 		//Select file dataspace
-		hid_t file_dataspace_id_3 = H5Dget_space(dataset_2);
+/*		hid_t file_dataspace_id_3 = H5Dget_space(dataset_2);
 
-        H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);
+        H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);*/
 
         hsize_t mdim_2[1] = {block[0]};
-        hsize_t mdim_3[1] = {block_add[0]};
+//        hsize_t mdim_3[1] = {block_add[0]};
 
 
 		//Size for data space in memory
@@ -1303,9 +1233,9 @@ public:
 
 		//Create data space in memory
 		hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
-		hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
-/*
-		if (mpi_rank == 0)
+//		hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
+
+		//if (mpi_rank == 0)
 		{
 			hssize_t size2;
 
@@ -1314,7 +1244,7 @@ public:
 			size2 = H5Sget_select_npoints (file_dataspace_id_2);
 			printf ("LOAD: dataspace_id_2 size: %llu\n", size2);
 		}
-
+/*
 		if (mpi_rank == 0)
 		{
 			hssize_t size2;
@@ -1325,58 +1255,51 @@ public:
 			printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
 		}
 */
-		size_t sum = 0;
+	size_t sum = 0;
 
 		for (int i = 0; i < mpi_size_old; i++)
 		{
 			sum += metadata_out[i];
 		}
 
-
-		//std::cout << "LOAD: sum: " << sum << std::endl;
+		std::cout << "LOAD: sum: " << sum << std::endl;
 
 		// allocate the memory
 		HeapMemory pmem;
-		HeapMemory pmem2;
+//		HeapMemory pmem2;
 		//pmem.allocate(req);
 		ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
 		mem.incRef();
-		ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
-		mem2.incRef();
+//		ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
+//		mem2.incRef();
 
 	  	// Read the dataset.
 	    H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
 
 	    // Read the dataset.
-		H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
+//		H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
 
 		mem.allocate(pmem.size());
-		mem2.allocate(pmem2.size());
-		//std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl;
+//		mem2.allocate(pmem2.size());
+		std::cout << "Mem.size(): " << mem.size() << " = " << block[0] << std::endl;
 
 		Unpack_stat ps;
 
-		Unpacker<decltype(v_pos),HeapMemory>::unpack(mem,v_pos,ps);
-		Unpacker<decltype(v_prp),HeapMemory>::unpack(mem,v_prp,ps);
-
-		Unpack_stat ps2;
-
 		openfpm::vector<Point<dim, St>> v_pos_unp;
 
 		openfpm::vector<prop> v_prp_unp;
 
-		Unpacker<decltype(v_pos),HeapMemory>::unpack(mem2,v_pos_unp,ps2);
-		Unpacker<decltype(v_prp),HeapMemory>::unpack(mem2,v_prp_unp,ps2);
+		Unpacker<decltype(v_pos_unp),HeapMemory>::unpack(mem,v_pos_unp,ps,1);
+		Unpacker<decltype(v_prp_unp),HeapMemory>::unpack(mem,v_prp_unp,ps,1);
+
+//		Unpack_stat ps2;
 
-	    // Close the dataset.
-	    H5Dclose(dataset);
-	    H5Dclose(dataset_2);
-	    // Close the file.
-	    H5Fclose(file);
-	    H5Pclose(plist_id);
 
-		//std::cout << "V_pos.size(): " << v_pos.size() << std::endl;
-		//std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl;
+//		Unpacker<decltype(v_pos),HeapMemory>::unpack(mem2,v_pos_unp,ps2,1);
+//		Unpacker<decltype(v_prp),HeapMemory>::unpack(mem2,v_prp_unp,ps2,1);
+
+//		std::cout << "V_pos.size(): " << v_pos.size() << std::endl;
+//		std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl;
 
 		mem.decRef();
 		delete &mem;
@@ -1384,7 +1307,173 @@ public:
 		for (size_t i = 0; i < v_pos_unp.size(); i++)
 			v_pos.add(v_pos_unp.get(i));
 
+		for (size_t i = 0; i < v_prp_unp.size(); i++)
+			v_prp.add(v_prp_unp.get(i));
+
 		g_m = v_pos.size();
+	}
+
+	inline void load(const std::string & filename)
+	{
+		v_pos.clear();
+		v_prp.clear();
+
+		g_m = 0;
+
+		MPI_Comm comm = v_cl.getMPIComm();
+		MPI_Info info  = MPI_INFO_NULL;
+
+		int mpi_rank = v_cl.getProcessUnitID();
+		//int mpi_size = v_cl.getProcessingUnits();
+
+		// Set up file access property list with parallel I/O access
+		hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
+		H5Pset_fapl_mpio(plist_id, comm, info);
+
+		//Open a file
+	    hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id);
+	    H5Pclose(plist_id);
+
+	    //Open dataset
+	    hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT);
+
+	    //Create property list for collective dataset read
+	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
+	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+		//Select file dataspace
+		hid_t file_dataspace_id = H5Dget_space(dataset);
+
+		hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
+/*
+		if (mpi_rank == 0)
+			printf ("\nOld MPI size: %llu\n", mpi_size_old);
+*/
+	  	//Where to read metadata
+	  	int metadata_out[mpi_size_old];
+
+	  	for (int i = 0; i < mpi_size_old; i++)
+	  	{
+	  		metadata_out[i] = 0;
+	  	}
+
+		//Size for data space in memory
+		hsize_t mdim[1] = {(size_t)mpi_size_old};
+
+		//Create data space in memory
+		hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
+/*
+		if (mpi_rank == 0)
+		{
+			hssize_t size;
+
+			size = H5Sget_select_npoints (mem_dataspace_id);
+			printf ("LOAD: memspace_id size: %llu\n", size);
+			size = H5Sget_select_npoints (file_dataspace_id);
+			printf ("LOAD: dataspace_id size: %llu\n", size);
+		}
+*/
+	  	// Read the dataset.
+	    H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
+/*
+		if (mpi_rank == 0)
+		{
+			std::cout << "Metadata_out[]: ";
+			for (int i = 0; i < mpi_size_old; i++)
+			{
+				std::cout << metadata_out[i] << " ";
+			}
+			std::cout << " " << std::endl;
+		}
+*/
+	    openfpm::vector<size_t> metadata_accum;
+	    metadata_accum.resize(mpi_size_old);
+
+	    metadata_accum.get(0) = 0;
+	    for (int i = 1 ; i < mpi_size_old ; i++)
+	    	metadata_accum.get(i) = metadata_accum.get(i-1) + metadata_out[i-1];
+
+	    //Open dataset
+	    hid_t dataset_2 = H5Dopen (file, "vector_dist", H5P_DEFAULT);
+
+	    //Create property list for collective dataset read
+	  	plist_id = H5Pcreate(H5P_DATASET_XFER);
+	  	H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+/*
+	  	openfpm::vector<openfpm::vector<hsize_t>> block0;
+	  	openfpm::vector<openfpm::vector<hsize_t>> block_add0;
+	  	openfpm::vector<openfpm::vector<hsize_t>> offset0;
+	  	openfpm::vector<openfpm::vector<hsize_t>> offset_add0;
+
+	  	block0.resize(mpi_size);
+	  	offset0.resize(mpi_size);
+	  	block_add0.resize(mpi_size);
+		offset_add0.resize(mpi_size);
+*/
+	  	openfpm::vector<size_t> n_block;
+	  	n_block.resize(v_cl.getProcessingUnits());
+
+
+	  	for(size_t i = 0 ; i < n_block.size() ; i++)
+	  		n_block.get(i) = mpi_size_old / v_cl.getProcessingUnits();
+
+	  	size_t rest_block = mpi_size_old % v_cl.getProcessingUnits();
+
+	  	std::cout << "MPI size old: " << mpi_size_old << std::endl;
+	  	std::cout << "MPI size: " << v_cl.getProcessingUnits() << std::endl;
+
+
+	  	std::cout << "Rest block: " << rest_block << std::endl;
+
+	  	size_t max_block;
+
+	  	if (rest_block != 0)
+	  		max_block = n_block.get(0) + 1;
+	  	else
+	  		max_block = n_block.get(0);
+
+	  	//for(size_t i = 0 ; i < n_block.size() ; i++)
+	  	for(size_t i = 0 ; i < rest_block ; i++)
+	  		n_block.get(i) += 1;
+
+
+	  	for(size_t i = 0 ; i < n_block.size() ; i++)
+	  		std::cout << "n_block.get(i): " << n_block.get(i) << std::endl;
+
+	  	size_t start_block = 0;
+	  	size_t stop_block = 0;
+
+
+	  	if (v_cl.getProcessUnitID() != 0)
+	  	{
+			for(size_t i = 0 ; i < v_cl.getProcessUnitID() ; i++)
+				start_block += n_block.get(i);
+	  	}
+
+	  	stop_block = start_block + n_block.get(v_cl.getProcessUnitID());
+
+	  	std::cout << "ID: " << v_cl.getProcessUnitID() << "; Start block: " << start_block << "; " << "Stop block: " << stop_block << std::endl;
+
+	  	if (mpi_rank >= mpi_size_old)
+	  		load_block(start_block,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2);
+	  	else
+	  	{
+	  		size_t n_bl = 0;
+	  		size_t lb = start_block;
+			for ( ; lb < stop_block ; lb++, n_bl++)
+				load_block(lb,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2);
+
+			if (n_bl < max_block)
+				load_block(-1,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2);
+	  	}
+
+	    // Close the dataset.
+	    H5Dclose(dataset);
+	    H5Dclose(dataset_2);
+	    // Close the file.
+	    H5Fclose(file);
+	    H5Pclose(plist_id);
 
 		//std::cout << "V_pos.size() after merge: " << v_pos.size() << std::endl;
 
diff --git a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp
index c30686fbfbb7d7b313d08307cce95ef6b931f259..b166fb2b892fc499374a4750ecfc03d6e3a6b41c 100644
--- a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp
+++ b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp
@@ -48,7 +48,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
 	const size_t Ng = cbrt(k);
 
 	// we create a Grid iterator
-	size_t sz[3] = {Ng,Ng,Ng};
+	size_t sz[dim] = {Ng,Ng,Ng};
 
 	for (size_t i = 0; i < dim; i++)
 		bc[i] = NON_PERIODIC;
@@ -75,16 +75,12 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
 		++it;
 	}
 
-	BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1));
-	BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1));
-	BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1));
-
-	//std::cout << "Size_local: " << vd.size_local_with_ghost() << std::endl;
+	//BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1));
+	//BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1));
+	//BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1));
 
 	vd.map();
 
-	//std::cout << "Size_local after map: " << vd.size_local_with_ghost() << std::endl;
-
 	// Put forces
 
 	auto it2 = vd.getDomainIterator();
@@ -95,7 +91,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
 
 		//Put the forces
 		for (size_t i = 0; i < dim; i++)
-			vd.template getProp<0>(key)[i] = 0.51234;
+			vd.template getProp<0>(key)[i] = 0.51234 + vd.getPos(key)[0] + vd.getPos(key)[1]+ vd.getPos(key)[2];
 
 		++it2;
 	}
@@ -109,6 +105,8 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
 	std::cout << "Saving time: " << t.getwct() << std::endl;
 }
 
+
+
 BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
 {
 	Vcluster & v_cl = create_vcluster();
@@ -141,6 +139,8 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
 
 	vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost);
 
+	vd.load("vector_dist.h5");
+
 	timer t;
 	t.start();
 	// Save the vector
@@ -164,13 +164,15 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
 
 	BOOST_REQUIRE_EQUAL(sum,k);
 
+	//std::cout << "Sum: " << sum << std::endl;
+
     // Check spacing (positions)
 
 	auto it = vd.getGridIterator(sz);
 
 	while (it.isNext())
 	{
-		auto key = it.get();
+		//auto key = it.get();
 
 		++it;
 	}
@@ -179,7 +181,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
 	BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1));
 	BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1));
 
-/*
+
 	// Check properties
 
 	auto it2 = vd.getDomainIterator();
@@ -190,11 +192,10 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
 
 		//Put the forces
 		for (size_t i = 0; i < dim; i++)
-			BOOST_CHECK_CLOSE(vd.template getProp<0>(key)[i],0.51234,0.0001);
+			BOOST_CHECK_CLOSE(vd.template getProp<0>(key)[i],0.51234 + vd.getPos(key)[0] + vd.getPos(key)[1]+ vd.getPos(key)[2],0.0001);
 
 		++it2;
 	}
-*/
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/Vector/vector_dist_NN_tests.hpp b/src/Vector/vector_dist_NN_tests.hpp
index a750bc95b6bfc5e915ef7e9f46268a9a8b96b5d0..931967eadf7b919e9f7c9454a929c8274edc51d3 100644
--- a/src/Vector/vector_dist_NN_tests.hpp
+++ b/src/Vector/vector_dist_NN_tests.hpp
@@ -217,5 +217,4 @@ BOOST_AUTO_TEST_CASE( vector_dist_full_NN )
 	}
 }
 
-
 #endif /* SRC_VECTOR_VECTOR_DIST_NN_TESTS_HPP_ */