diff --git a/src/Grid/grid_dist_id.hpp b/src/Grid/grid_dist_id.hpp
index 208b654f9eb2edca1d9607b96ff49db1be20770d..2ecf2f5895bd93032d020490d75ac34f4ec5ce9f 100644
--- a/src/Grid/grid_dist_id.hpp
+++ b/src/Grid/grid_dist_id.hpp
@@ -1556,6 +1556,161 @@ public:
 		}
 	}
 
+	/*! \brief It move all the grid parts that do not belong to the local processor to the respective processor
+	 *
+	 *
+	 *
+	 *
+	 */
+	void map()
+	{
+
+	}
+
+	inline void save(const std::string & filename) const
+	{
+		//Pack_request vector
+		size_t req = 0;
+
+		//Pack request
+		//Packer<decltype(dec),HeapMemory>::packRequest(dec,req);
+		Packer<decltype(gdb_ext),HeapMemory>::packRequest(gdb_ext,req);
+
+		std::cout << "Req: " << req << std::endl;
+
+		// allocate the memory
+		HeapMemory pmem;
+		//pmem.allocate(req);
+		ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(req,pmem));
+		mem.incRef();
+
+		//Packing
+
+		Pack_stat sts;
+
+		//Packer<decltype(dec),HeapMemory>::pack(mem,dec,sts);
+		Packer<decltype(gdb_ext),HeapMemory>::pack(mem,gdb_ext,sts);
+
+	    /*****************************************************************
+	     * Create a new file with default creation and access properties.*
+	     * Then create a dataset and write data to it and close the file *
+	     * and dataset.                                                  *
+	     *****************************************************************/
+
+		int mpi_rank = v_cl.getProcessUnitID();
+		int mpi_size = v_cl.getProcessingUnits();
+
+		if (mpi_rank == 0)
+			std::cout << "Saving grid" << std::endl;
+
+		MPI_Comm comm = v_cl.getMPIComm();
+		MPI_Info info  = MPI_INFO_NULL;
+/*
+	    //Initialize MPI
+	    MPI_Comm_size(comm, &mpi_size);
+	    MPI_Comm_rank(comm, &mpi_rank);
+*/
+	    // Set up file access property list with parallel I/O access
+
+		hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
+		H5Pset_fapl_mpio(plist_id, comm, info);
+
+		// Create a new file collectively and release property list identifier.
+		hid_t file = H5Fcreate (filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+		H5Pclose(plist_id);
+
+		size_t sz = pmem.size();
+		std::cout << "Pmem.size: " << pmem.size() << std::endl;
+		openfpm::vector<size_t> sz_others;
+		v_cl.allGather(sz,sz_others);
+		v_cl.execute();
+
+		size_t sum = 0;
+
+		for (size_t i = 0; i < sz_others.size(); i++)
+			sum += sz_others.get(i);
+
+		//Size for data space in file
+		hsize_t fdim[1] = {sum};
+
+		//Size for data space in file
+		hsize_t fdim2[1] = {mpi_size};
+
+		//Create data space in file
+		hid_t file_dataspace_id = H5Screate_simple(1, fdim, NULL);
+
+		//Create data space in file
+		hid_t file_dataspace_id_2 = H5Screate_simple(1, fdim2, NULL);
+
+		//Size for data space in memory
+		hsize_t mdim[1] = {pmem.size()};
+
+		//Create data space in memory
+		hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
+
+		std::cout << "Sum: " << sum << std::endl;
+
+		//Create data set in file
+		hid_t file_dataset = H5Dcreate (file, "grid_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+		//Create data set 2 in file
+		hid_t file_dataset_2 = H5Dcreate (file, "metadata", H5T_NATIVE_INT, file_dataspace_id_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+	    //H5Pclose(plist_id);
+	    H5Sclose(file_dataspace_id);
+	    H5Sclose(file_dataspace_id_2);
+
+	    hsize_t block[1] = {pmem.size()};
+
+	    hsize_t stride[1] = {1};
+
+		hsize_t count[1] = {1};
+
+	    hsize_t offset[1] = {0};
+
+	    for (size_t i = 0; i < mpi_rank; i++)
+	    {
+	    	if (mpi_rank == 0)
+				offset[0] = 0;
+	    	else
+	    		offset[0] += sz_others.get(i);
+	    }
+
+	    std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
+
+	    int metadata[mpi_size];
+
+	    for (size_t i = 0; i < mpi_size; i++)
+	    	metadata[i] = sz_others.get(i);
+
+	    //Select hyperslab in the file.
+	    file_dataspace_id = H5Dget_space(file_dataset);
+	    H5Sselect_hyperslab(file_dataspace_id, H5S_SELECT_SET, offset, NULL, count, block);
+
+	    file_dataspace_id_2 = H5Dget_space(file_dataset_2);
+
+
+	    //Create property list for collective dataset write.
+	    plist_id = H5Pcreate(H5P_DATASET_XFER);
+	    H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+		//Write a data set to a file
+		herr_t status = H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer());
+
+		//Write a data set 2 to a file
+		herr_t status_2 = H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
+
+
+	    //Close/release resources.
+	    H5Dclose(file_dataset);
+	    H5Sclose(file_dataspace_id);
+	    H5Dclose(file_dataset_2);
+	    H5Sclose(file_dataspace_id_2);
+	    H5Sclose(mem_dataspace_id);
+	    H5Pclose(plist_id);
+	    H5Fclose(file);
+	}
+
 	//! Define friend classes
 	//\cond
 	friend grid_dist_id<dim,St,T,typename Decomposition::extended_type,Memory,device_grid>;
diff --git a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3c35c9b4088d6ed18b6da64d214f2fa00c6aa45a
--- /dev/null
+++ b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp
@@ -0,0 +1,87 @@
+/*
+ * grid_dist_id_HDF5_chckpnt_restart_test.hpp
+ *
+ *  Created on: Nov 9, 2016
+ *      Author: Yaroslav Zaluzhnyi
+ */
+
+#ifndef SRC_GRID_GRID_DIST_ID_HDF5_CHCKPNT_RESTART_TEST_HPP_
+#define SRC_GRID_GRID_DIST_ID_HDF5_CHCKPNT_RESTART_TEST_HPP_
+
+#include "Grid/grid_dist_id.hpp"
+
+BOOST_AUTO_TEST_SUITE( gd_hdf5_chckpnt_rstrt_test )
+
+BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
+{
+
+	// Input data
+	size_t k = 100;
+
+	size_t ghost_part = 0.01;
+
+	/////////////////
+	size_t bc[3] = {NON_PERIODIC, NON_PERIODIC, NON_PERIODIC};
+
+	// Domain
+	Box<3,float> domain({-0.3,-0.3,-0.3},{1.0,1.0,1.0});
+
+	Vcluster & v_cl = create_vcluster();
+
+	// Skip this test on big scale
+	if (v_cl.getProcessingUnits() >= 32)
+		return;
+
+	if (v_cl.getProcessUnitID() == 0)
+			std::cout << "Testing 3D grid HDF5 save/load" << std::endl;
+
+	// grid size
+	size_t sz[3];
+	sz[0] = k;
+	sz[1] = k;
+	sz[2] = k;
+
+	// Ghost
+	Ghost<3,float> g(ghost_part);
+
+	// Distributed grid with id decomposition
+	grid_dist_id<3, float, scalar<float>, CartDecomposition<3,float>> g_dist(sz,domain,g);
+
+	// get the decomposition
+	auto & dec = g_dist.getDecomposition();
+
+	// check the consistency of the decomposition
+	bool val = dec.check_consistency();
+	BOOST_REQUIRE_EQUAL(val,true);
+
+	// for each local volume
+	// Get the number of local grid needed
+	size_t n_grid = dec.getNSubDomain();
+
+	size_t vol = 0;
+
+	// vector of boxes
+	openfpm::vector<Box<3,size_t>> vb;
+
+	// Allocate the grids
+	for (size_t i = 0 ; i < n_grid ; i++)
+	{
+		// Get the local hyper-cube
+		SpaceBox<3,float> sub = dec.getSubDomain(i);
+		sub -= domain.getP1();
+
+		Box<3,size_t> g_box = g_dist.getCellDecomposer().convertDomainSpaceIntoGridUnits(sub,bc);
+
+		vb.add(g_box);
+
+		vol += g_box.getVolumeKey();
+	}
+
+
+	// Save the vector
+    g_dist.save("grid_dist_id.h5");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+#endif /* SRC_GRID_GRID_DIST_ID_HDF5_CHCKPNT_RESTART_TEST_HPP_ */
diff --git a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..32085c6341cc11f2d1054d5afbbf373c5971a49c
--- /dev/null
+++ b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp
@@ -0,0 +1,186 @@
+/*
+ * vector_dist_HDF5_save.hpp
+ *
+ *  Created on: Jun 12, 2016
+ *      Author: Yaroslav Zaluzhnyi
+ */
+
+#ifndef SRC_VECTOR_VECTOR_DIST_HDF5_CHCKPNT_RESTART_TEST_HPP_
+#define SRC_VECTOR_VECTOR_DIST_HDF5_CHCKPNT_RESTART_TEST_HPP_
+
+#include "vector_dist.hpp"
+#include "Packer_Unpacker/Pack_selector.hpp"
+#include "Packer_Unpacker/Packer.hpp"
+#include "Packer_Unpacker/Unpacker.hpp"
+#include "vector_dist_performance_util.hpp"
+
+
+#include "hdf5.h"
+
+BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test )
+
+BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
+{
+	// Input data
+
+	const size_t dim = 3;
+
+	size_t k = 100;
+
+	size_t ghost_part = 0.1;
+
+	/////////////////
+
+	Vcluster & v_cl = create_vcluster();
+
+	if (v_cl.getProcessUnitID() == 0)
+		std::cout << "Testing " << dim << "D vector HDF5 save/load" << std::endl;
+
+	Box<dim,float> box;
+
+	for (size_t i = 0; i < dim; i++)
+	{
+		box.setLow(i,0.0);
+		box.setHigh(i,1.0);
+	}
+
+	// Boundary conditions
+	size_t bc[dim];
+
+	for (size_t i = 0; i < dim; i++)
+		bc[i] = PERIODIC;
+
+	vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,Ghost<dim,float>(ghost_part));
+
+	// Initialize a dist vector
+	//vd_initialize<dim>(vd, v_cl, k);
+
+	size_t sz[3] = {10,10,10};
+
+	auto it = vd.getGridIterator(sz);
+
+	while (it.isNext())
+	{
+		vd.add();
+
+		auto key = it.get();
+
+		vd.getLastPos()[0] = key.get(0) * it.getSpacing(0);
+		vd.getLastPos()[1] = key.get(1) * it.getSpacing(1);
+		vd.getLastPos()[2] = key.get(2) * it.getSpacing(2);
+
+		++it;
+	}
+
+	vd.map();
+
+	vd.template ghost_get<0>();
+
+	// The random generator engine
+	std::default_random_engine eg(v_cl.getProcessUnitID()*4313);
+	std::uniform_real_distribution<float> ud(0.0f, 1.0f);
+
+	//! [Create a vector of random elements on each processor 2D]
+
+	auto it_2 = vd.getIterator();
+
+	while (it.isNext())
+	{
+		auto key = it_2.get();
+
+		//Put the forces
+		for (size_t i = 0; i < dim; i++)
+			vd.template getProp<0>(key)[i] = ud(eg);
+
+		++it_2;
+	}
+
+	// Save the vector
+    vd.save("vector_dist.h5");
+
+    // Load the vector
+/*
+    vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd2(0,box,bc,Ghost<dim,float>(ghost_part));
+
+    vd2.load("vector_dist.h5");
+
+    auto it_d = vd.getDomainIterator();
+
+    while (it_d.isNext())
+    {
+		auto key = it_d.get();
+
+		// Get the position of the particles
+		Point<dim,float> p1 = vd.getPos(key);
+		Point<dim,float> p2 = vd2.getPos(key);
+
+		BOOST_REQUIRE(p1 == p2);
+
+		++it_d;
+    }*/
+}
+
+BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
+{
+	if (create_vcluster().getProcessUnitID() == 0)
+		std::cout << "Loading distributed vector" << std::endl;
+
+	const size_t dim = 3;
+
+	size_t ghost_part = 0.1;
+
+	Box<dim,float> box;
+
+	for (size_t i = 0; i < dim; i++)
+	{
+		box.setLow(i,0.0);
+		box.setHigh(i,1.0);
+	}
+
+	// Boundary conditions
+	size_t bc[dim];
+
+	for (size_t i = 0; i < dim; i++)
+		bc[i] = PERIODIC;
+
+	vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,Ghost<dim,float>(ghost_part));
+
+	vd.load("vector_dist.h5");
+
+	auto NN = vd.getCellList(0.5);
+
+	auto it_v = vd.getDomainIterator();
+
+	while (it_v.isNext())
+	{
+		//key
+		vect_dist_key_dx key = it_v.get();
+
+		size_t count = 0;
+
+		// Get the position of the particles
+		Point<dim,float> p = vd.getPos(key);
+
+		// Get the neighborhood of the particle
+		auto cell_it = NN.template getNNIterator<NO_CHECK>(NN.getCell(p));
+
+		while(cell_it.isNext())
+		{
+			//Next particle in a cell
+			++cell_it;
+			count++;
+		}
+
+		std::cout << "Count: " << count << std::endl;
+
+		//Next particle in cell list
+		++it_v;
+	}
+
+
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+
+#endif /* SRC_VECTOR_VECTOR_DIST_HDF5_CHCKPNT_RESTART_TEST_HPP_ */
diff --git a/src/main.cpp b/src/main.cpp
index c60b455f760aa5c64e8c240c5e273acc34046d6e..aac1c191694ca4f32dff177aec9f77143accf3d4 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -40,6 +40,8 @@ int main(int argc, char* argv[])
 #include "Decomposition/Distribution/metis_util_unit_test.hpp"
 #include "dec_optimizer_unit_test.hpp"
 #include "Vector/vector_dist_unit_test.hpp"
+#include "Vector/vector_dist_HDF5_chckpnt_restart_test.hpp"
+#include "Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp"
 #ifdef PERFORMANCE_TEST
 #include "pdata_performance.hpp"
 #endif