Commit 2acc9117 authored by incardon's avatar incardon

Adding SE_CLASS1 for GPU

parent 8eedc3b4
......@@ -72,11 +72,11 @@ struct grid_toKernelImpl
{
grid_gpu_ker<dim,T,memory_traits_lin> g(gc.getGrid());
g.data_.mem = gc.get_internal_data_().mem;
g.get_data_().mem = gc.get_internal_data_().mem;
// Increment the reference of mem
g.data_.mem->incRef();
g.data_.mem_r.bind_ref(gc.get_internal_data_().mem_r);
g.data_.switchToDevicePtr();
g.get_data_().mem->incRef();
g.get_data_().mem_r.bind_ref(gc.get_internal_data_().mem_r);
g.get_data_().switchToDevicePtr();
return g;
}
......@@ -89,7 +89,7 @@ struct grid_toKernelImpl<true,dim,T>
{
grid_gpu_ker<dim,T,memory_traits_inte> g(gc.getGrid());
copy_switch_memory_c_no_cpy<typename std::remove_reference<decltype(gc.get_internal_data_())>::type,
typename std::remove_reference<decltype(g.data_)>::type> cp_mc(gc.get_internal_data_(),g.data_);
typename std::remove_reference<decltype(g.get_data_())>::type> cp_mc(gc.get_internal_data_(),g.get_data_());
boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp_mc);
......
......@@ -12,6 +12,7 @@
#include "Point_test.hpp"
#include "Grid/grid_util_test.hpp"
#include "cuda_grid_unit_tests_func.cuh"
#include "util/cuda/cuda_launch.hpp"
BOOST_AUTO_TEST_SUITE( grid_gpu_func_test )
......@@ -593,4 +594,91 @@ BOOST_AUTO_TEST_CASE (gpu_copy_device)
gpu_copy_device_test<1>();
}
template<typename grid_type>
__global__ void test_se1_crash_gt2(grid_type gt1, grid_type gt2)
{
int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p == 279)
{
grid_key_dx<2> k({10000,12345});
gt1.template get<1>(k)[2] = 6.0;
}
}
template<typename grid_type>
__global__ void test_se1_crash_gt3(grid_type gt1, grid_type gt2)
{
grid_key_dx<2> k({10000,12345});
gt1.template get<2>(k)[2][2] = 6.0;
}
BOOST_AUTO_TEST_CASE (gpu_grid_test_se_class1)
{
#ifdef SE_CLASS1
size_t sz[2] = {5,5};
grid_gpu<2, aggregate<float,float[3],float[3][3]> > c3(sz);
c3.setMemory();
grid_gpu<2, aggregate<float,float[3],float[3][3]> > c2(sz);
c2.setMemory();
int dev_mem[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
test_se1_crash_gt2<<<{32,1,1},{16,1,1}>>>(c3.toKernel(),c2.toKernel());
cudaDeviceSynchronize();
cudaMemcpyFromSymbol(dev_mem,global_cuda_error_array,sizeof(dev_mem));
BOOST_REQUIRE_EQUAL(dev_mem[0],1);
BOOST_REQUIRE_EQUAL(*(size_t *)(&dev_mem[1]),(size_t)(c3.toKernel().template getPointer<1>()));
BOOST_REQUIRE_EQUAL(dev_mem[3],1);
BOOST_REQUIRE_EQUAL(dev_mem[4],2);
BOOST_REQUIRE_EQUAL(dev_mem[5],10000);
BOOST_REQUIRE_EQUAL(dev_mem[6],12345);
BOOST_REQUIRE_EQUAL(dev_mem[7],17);
BOOST_REQUIRE_EQUAL(dev_mem[8],0);
BOOST_REQUIRE_EQUAL(dev_mem[9],0);
BOOST_REQUIRE_EQUAL(dev_mem[10],16);
BOOST_REQUIRE_EQUAL(dev_mem[11],1);
BOOST_REQUIRE_EQUAL(dev_mem[12],1);
BOOST_REQUIRE_EQUAL(dev_mem[13],7);
BOOST_REQUIRE_EQUAL(dev_mem[14],0);
BOOST_REQUIRE_EQUAL(dev_mem[15],0);
int dev_mem2[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
test_se1_crash_gt3<<<{32,1,1},{16,1,1}>>>(c2.toKernel(),c3.toKernel());
cudaDeviceSynchronize();
cudaMemcpyFromSymbol(dev_mem2,global_cuda_error_array,sizeof(dev_mem2));
BOOST_REQUIRE_EQUAL(dev_mem2[0],1);
BOOST_REQUIRE_EQUAL(*(size_t *)(&dev_mem2[1]),(size_t)(c2.toKernel().template getPointer<2>()));
BOOST_REQUIRE_EQUAL(dev_mem2[3],2);
BOOST_REQUIRE_EQUAL(dev_mem2[4],2);
std::cout << "######### Testing error message #########" << std::endl;
dim3 wthr;
wthr.x = 32;
wthr.y = 1;
wthr.z = 1;
dim3 thr;
thr.x = 16;
thr.y = 1;
thr.z = 1;
CUDA_LAUNCH(test_se1_crash_gt2,wthr,thr,c3.toKernel(),c2.toKernel());
std::cout << "######### End Testing error message #########" << std::endl;
#endif
}
BOOST_AUTO_TEST_SUITE_END()
......@@ -8,8 +8,12 @@
#ifndef MAP_GRID_CUDA_KER_HPP_
#define MAP_GRID_CUDA_KER_HPP_
#include "config.h"
#include "Grid/grid_base_impl_layout.hpp"
#include "util/tokernel_transformation.hpp"
#ifdef CUDA_GPU
#include "memory/CudaMemory.cuh"
#endif
/*! \brief this class is a functor for "for_each" algorithm
*
......@@ -61,7 +65,7 @@ struct grid_gpu_ker_constructor_impl
{
template<typename ggk_type> static inline void construct(const ggk_type & cpy,ggk_type & this_)
{
copy_switch_memory_c_no_cpy<decltype(cpy.data_),decltype(this_.data_)> bp_mc(cpy.data_,this_.data_);
copy_switch_memory_c_no_cpy<decltype(cpy.get_data_()),decltype(this_.get_data_())> bp_mc(cpy.get_data_(),this_.get_data_());
boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(bp_mc);
}
......@@ -72,21 +76,70 @@ struct grid_gpu_ker_constructor_impl<false,T>
{
template<typename ggk_type> static inline void construct(const ggk_type & cpy,ggk_type & this_)
{
this_.data_.mem = cpy.data_.mem;
this_.get_data_().mem = cpy.get_data_().mem;
// Increment the reference of mem
this_.data_.mem->incRef();
this_.data_.mem_r.bind_ref(cpy.data_.mem_r);
this_.data_.switchToDevicePtr();
this_.get_data_().mem->incRef();
this_.get_data_().mem_r.bind_ref(cpy.get_data_().mem_r);
this_.get_data_().switchToDevicePtr();
}
};
template<unsigned int dim, int prp, typename ids_type>
__device__ void fill_grid_error_array_overflow(const void * sptr,grid_key_dx<dim,ids_type> key)
{
#ifdef CUDA_GPU
int * ptr = (int *)&global_cuda_error_array[0];
ptr[0] = 1;
ptr[1] = ((size_t)sptr) & 0xFFFFFFFF;
ptr[2] = (((size_t)sptr) & 0xFFFFFFFF00000000) >> 32;
ptr[3] = prp;
ptr[4] = dim;
for (int i = 0 ; i < dim ; i++)
{ptr[i+5] = key.get(i);}
#ifdef __NVCC__
ptr[5+dim] = blockIdx.x;
ptr[6+dim] = blockIdx.y;
ptr[7+dim] = blockIdx.z;
ptr[8+dim] = blockDim.x;
ptr[9+dim] = blockDim.y;
ptr[10+dim] = blockDim.z;
ptr[11+dim] = threadIdx.x;
ptr[12+dim] = threadIdx.y;
ptr[13+dim] = threadIdx.z;
#endif
#endif
}
template<unsigned int dim>
__device__ void fill_grid_error_array(size_t lin_id)
{
#ifdef CUDA_GPU
int * ptr = (int *)&global_cuda_error_array[0];
ptr[0] = 1;
ptr[1] = 1;
ptr[2] = lin_id;
#endif
}
/*! \brief grid interface available when on gpu
*
* \tparam n_buf number of template buffers
*
*/
template<unsigned int dim, typename T, template <typename> class layout_base>
struct grid_gpu_ker
class grid_gpu_ker
{
//! Type T
typedef typename apply_transform<layout_base,T>::type T_;
......@@ -100,6 +153,47 @@ struct grid_gpu_ker
//! layout data
layout data_;
/*! \brief Check that the key is inside the grid
*
* \param key
*
* \return
*
*/
template<typename ids_type> __device__ __host__ inline bool check_bound(const grid_key_dx<dim,ids_type> & v1) const
{
for (long int i = 0 ; i < dim ; i++)
{
if (v1.get(i) >= (long int)getGrid().size(i))
{return false;}
else if (v1.get(i) < 0)
{return false;}
}
return true;
}
/*! \brief Check that the key is inside the grid
*
* \param key
*
* \return true if it is bound
*
*/
__device__ __host__ inline bool check_bound(size_t v1) const
{
return v1 < getGrid().size();
}
public:
//! it define that it is a grid
typedef int yes_i_am_grid;
//! Type of the value the vector is storing
typedef T value_type;
__device__ __host__ grid_gpu_ker()
{}
......@@ -136,6 +230,11 @@ struct grid_gpu_ker
template <unsigned int p, typename ids_type,typename r_type=decltype(mem_get<p,layout_base<T_>,layout,grid_sm<dim,T_>,grid_key_dx<dim>>::get(data_,g1,grid_key_dx<dim>()))>
__device__ __host__ inline r_type get(const grid_key_dx<dim,ids_type> & v1)
{
#ifdef SE_CLASS1
if (check_bound(v1) == false)
{fill_grid_error_array_overflow<dim,p>(this->template getPointer<p>(),v1);}
#endif
return mem_get<p,layout_base<T_>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(data_,g1,v1);
}
......@@ -149,6 +248,10 @@ struct grid_gpu_ker
template <unsigned int p, typename ids_type, typename r_type=decltype(mem_get<p,layout_base<T_>,layout,grid_sm<dim,T_>,grid_key_dx<dim>>::get_c(data_,g1,grid_key_dx<dim>()))>
__device__ __host__ inline const r_type get(const grid_key_dx<dim,ids_type> & v1) const
{
#ifdef SE_CLASS1
if (check_bound(v1) == false)
{fill_grid_error_array_overflow<dim,p>(this->template getPointer<p>(),v1);}
#endif
return mem_get<p,layout_base<T_>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get_c(data_,g1,v1);
}
......@@ -162,6 +265,10 @@ struct grid_gpu_ker
template <unsigned int p, typename r_type=decltype(mem_get<p,layout_base<T_>,layout,grid_sm<dim,T_>,grid_key_dx<dim>>::get_lin(data_,g1,0))>
__device__ __host__ inline r_type get(const size_t lin_id)
{
#ifdef SE_CLASS1
if (check_bound(lin_id) == false)
{fill_grid_error_array_overflow<p>(this->getPointer(),lin_id);}
#endif
return mem_get<p,memory_traits_inte<T_>,decltype(this->data_),decltype(this->g1),grid_key_dx<dim>>::get_lin(data_,g1,lin_id);
}
......@@ -175,6 +282,10 @@ struct grid_gpu_ker
template <unsigned int p, typename r_type=decltype(mem_get<p,layout_base<T_>,layout,grid_sm<dim,T_>,grid_key_dx<dim>>::get_lin(data_,g1,0))>
__device__ __host__ inline const r_type get(size_t lin_id) const
{
#ifdef SE_CLASS1
if (check_bound(lin_id) == false)
{fill_grid_error_array_overflow<p>(this->getPointer(),lin_id);}
#endif
return mem_get<p,layout_base<T_>,decltype(this->data_),decltype(this->g1),grid_key_dx<dim>>::get_lin(data_,g1,lin_id);
}
......@@ -191,6 +302,10 @@ struct grid_gpu_ker
*/
__device__ inline encapc<dim,T_,layout> get_o(const grid_key_dx<dim> & v1)
{
#ifdef SE_CLASS1
if (check_bound(v1) == false)
{fill_grid_error_array_overflow<dim,-1>(this->template getPointer<0>(),v1);}
#endif
return mem_geto<dim,T_,layout_base<T_>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(data_,g1,v1);
}
......@@ -207,12 +322,25 @@ struct grid_gpu_ker
*/
__device__ inline const encapc<dim,T_,layout> get_o(const grid_key_dx<dim> & v1) const
{
#ifdef SE_CLASS1
if (check_bound(v1) == false)
{fill_grid_error_array_overflow<dim,-1>(this->template getPointer<0>(),v1);}
#endif
return mem_geto<dim,T,layout_base<T_>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(const_cast<decltype(this->data_) &>(data_),g1,v1);
}
__device__ inline void set(const grid_key_dx<dim> & key1,const grid_gpu_ker<dim,T_,layout_base> & g, const grid_key_dx<dim> & key2)
{
#ifdef SE_CLASS1
if (check_bound(key1) == false)
{fill_grid_error_array_overflow<dim,-1>(this->template getPointer<0>(),key1);}
if (g.check_bound(key2) == false)
{fill_grid_error_array_overflow<dim,-1>(g.template getPointer<0>(),key2);}
#endif
T_ tmp;
copy_encap_vector_fusion<decltype(g.get_o(key2)),typename T_::type> cp(g.get_o(key2),tmp.data);
......@@ -223,6 +351,15 @@ struct grid_gpu_ker
template<unsigned int ... prp> __device__ inline void set(const grid_key_dx<dim> & key1,const grid_gpu_ker<dim,T_,layout_base> & g, const grid_key_dx<dim> & key2)
{
#ifdef SE_CLASS1
if (check_bound(key1) == false)
{fill_grid_error_array_overflow<dim,-1>(this->template getPointer<0>(),key1);}
if (g.check_bound(key2) == false)
{fill_grid_error_array_overflow<dim,-1>(g.template getPointer<0>(),key2);}
#endif
auto edest = this->get_o(key1);
copy_cpu_encap_encap_prp<decltype(g.get_o(key2)),decltype(this->get_o(key1)),prp...> ec(g.get_o(key2),edest);
......@@ -240,6 +377,11 @@ struct grid_gpu_ker
*/
template<typename Memory> __device__ inline void set(grid_key_dx<dim> key1, const encapc<1,T,Memory> & obj)
{
#ifdef SE_CLASS1
if (check_bound(key1) == false)
{fill_grid_error_array_overflow<dim,-1>(this->template getPointer<0>(),key1);}
#endif
this->get_o(key1) = obj;
}
......@@ -248,7 +390,17 @@ struct grid_gpu_ker
* \tparam property p
*
*/
template<unsigned int p> __device__ void * getPointer()
template<unsigned int p> __device__ __host__ void * getPointer()
{
return mem_getpointer<decltype(data_),layout_base<T>>::template getPointer<p>(data_);
}
/*! \brief Get the pointer for the property p
*
* \tparam property p
*
*/
template<unsigned int p> __device__ __host__ const void * getPointer() const
{
return mem_getpointer<decltype(data_),layout_base<T>>::template getPointer<p>(data_);
}
......@@ -266,6 +418,26 @@ struct grid_gpu_ker
return *this;
}
/*! \brief Get the internal data_ structure
*
* \return the data_ structure
*
*/
inline layout & get_data_()
{
return data_;
}
/*! \brief Get the internal data_ structure
*
* \return the data_ structure
*
*/
inline const layout & get_data_() const
{
return data_;
}
};
......
......@@ -284,6 +284,11 @@ struct mem_getpointer
{
return data_.mem_r.get_pointer();
}
template<unsigned int d> __device__ __host__ static void * getPointer(const data_type & data_)
{
return data_.mem_r.get_pointer();
}
};
template<typename data_type, typename layout>
......@@ -293,6 +298,11 @@ struct mem_getpointer<data_type,layout,1>
{
return boost::fusion::at_c<p>(data_).mem_r.get_pointer();
}
template<unsigned int p> __device__ __host__ static const void * getPointer(const data_type & data_)
{
return boost::fusion::at_c<p>(data_).mem_r.get_pointer();
}
};
template<typename data_type, typename Mem_type, typename layout, unsigned int sel = 2*is_layout_mlin<layout>::value + is_layout_inte<layout>::value>
......
......@@ -237,7 +237,7 @@ namespace openfpm
* \tparam property p
*
*/
template<unsigned int p> __device__ void * getPointer()
template<unsigned int p> __device__ __host__ void * getPointer()
{
//! copy the element
return base.template getPointer<p>();
......
......@@ -37,6 +37,7 @@
#include "data_type/aggregate.hpp"
#include "vector_map_iterator.hpp"
#include "util/cuda_util.hpp"
#include "util/cuda/cuda_launch.hpp"
#include "cuda/map_vector_cuda_ker.cuh"
namespace openfpm
......
......@@ -141,7 +141,7 @@ struct copy_encap_vector_fusion
* this mean that the object passed must not be a temporal object
*
*/
inline copy_fusion_vector_encap(const enc && src, bfv && dst)
__device__ __host__ inline copy_encap_vector_fusion(const enc && src, bfv && dst)
:src(src),dst(dst)
{std::cerr << "Error: " <<__FILE__ << ":" << __LINE__ << " Passing a temporal object\n";};
#endif
......
/*
* se_class1_cuda.hpp
*
* Created on: Jan 13, 2019
* Author: i-bird
*/
#ifndef SE_CLASS1_CUDA_HPP_
#define SE_CLASS1_CUDA_HPP_
#include "Grid/util.hpp"
#include "Vector/util.hpp"
template<typename T, int type_of_t=2*is_grid<T>::value+1*is_vector<T>::value>
struct check_type
{
static int check(void * ptr, int prp, T & arg)
{
return false;
}
};
/*! \brief this class is a functor for "for_each" algorithm
*
* This class is a functor for "for_each" algorithm. It check if the
* pointer ptr match one of the pointer properties
*
*/
template<typename data_type>
struct check_device_ptr
{
//! pointer to check
void * ptr;
//! Data to check
data_type & data;
int prp;
mutable bool result;
/*! \brief constructor
*
* \param ptr pointer to check
* \param data data structure
*
*/
inline check_device_ptr(void * ptr, int prp, data_type & data)
:ptr(ptr),data(data),prp(prp)
{
};
//! It call the copy function for each property
template<typename T>
inline void operator()(T& t)
{
if (T::value == prp)
{
result = data.template getPointer<T::value>() == ptr;
}
}
};
template<typename T>
struct check_type<T,1>
{
static int check(void * ptr, int prp, T & arg)
{
check_device_ptr<T> cp(ptr,prp,arg);
boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::value_type::max_prop> >(cp);
return cp.result;
}
};
template<typename T>
struct check_type<T,2>
{
static int check(void * ptr, int prp, T & arg)
{
check_device_ptr<T> cp(ptr,prp,arg);
boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::value_type::max_prop> >(cp);
return cp.result;
}
};
template<typename ArgL>
int error_args_impl(void * ptr, int prp, ArgL argl)
{
if (check_type<ArgL>::check(ptr,prp,argl) == true)
{
return 0;
}
return -1;
}
template<typename ArgL, typename ... Args>
int error_args_impl(void * ptr, int prp, ArgL argl, Args ... args)
{
if (check_type<ArgL>::check(ptr,prp,argl) == true)
{
return sizeof...(args);
}
return error_args_impl(ptr, prp, args ...);
}
template<typename ... Args>int error_arg(void * ptr, int prp, Args ... args)
{
int pos = error_args_impl(ptr, prp, args ... );
return sizeof...(args) - pos - 1;
}
#include <boost/algorithm/string.hpp>
#ifdef SE_CLASS1
#define CHECK_SE_CLASS1_PRE int dev_mem[] = {0,0,0,0,0,0,0,0,0,0,0};
//#define CHECK_SE_CLASS1_POST(...) cudaMemcpyFromSymbol(dev_mem,global_cuda_error_array,sizeof(dev_mem));
#define CHECK_SE_CLASS1_POST(kernel_call,...) cudaMemcpyFromSymbol(dev_mem,global_cuda_error_array,sizeof(dev_mem)); \
if (dev_mem[0] != 0)\
{\
void * ptr = (void *)*(size_t *)&dev_mem[1]; \
int prp_err = dev_mem[3];\
int ea = error_arg(ptr,prp_err,__VA_ARGS__);\
std::string args_s( #__VA_ARGS__ );\
std::vector<std::string> results;\
boost::split(results, args_s, [](char c){return c == ',';});\
std::cout << __FILE__ << ":" << __LINE__ << " Overflow detected in Kernel: " << kernel_call << " from the structure " << results[ea] << " property: " << prp_err << " index:(" ;\
int i = 0; \
for ( ; i < dev_mem[4]-1 ; i++)\
{\
std::cout << dev_mem[5+i] << ",";\
}\
std::cout << dev_mem[5+i];\
std::cout << ")";\
std::cout << " thread: " << "(" << dev_mem[6+i] << "," << dev_mem[7+i] << "," << dev_mem[8+i] << ")*(" << dev_mem[9+i] << "," << dev_mem[10+i] << "," << dev_mem[11+i] << ")+(" << dev_mem[12+i] << "," << dev_mem[13+i] << "," << dev_mem[14+i] << ")" << std::endl;\
}
#else
#define CHECK_SE_CLASS1_PRE
#define CHECK_SE_CLASS1_POST(kernel_call,...)
#endif
#endif /* SE_CLASS1_CUDA_HPP_ */
/*
* cuda_util.hpp
* cuda_launch.hpp
*
* Created on: Jun 13, 2018
* Created on: Jan 14, 2019
* Author: i-bird
*/
#ifndef OPENFPM_DATA_SRC_UTIL_CUDA_UTIL_HPP_
#define OPENFPM_DATA_SRC_UTIL_CUDA_UTIL_HPP_
#include "config.h"
#ifdef CUDA_GPU
#include <cuda_runtime.h>
#endif
#ifndef CUDA_LAUNCH_HPP_
#define CUDA_LAUNCH_HPP_
#ifdef CUDA_GPU
#if defined(SE_CLASS1) || defined(CUDA_CHECK_LAUNCH)
#include "cuda_kernel_error_checker.hpp"
#define CUDA_LAUNCH(cuda_call,grid_size,block_size, ...) \
{\
CHECK_SE_CLASS1_PRE\
cuda_call<<<(grid_size),(block_size)>>>(__VA_ARGS__); \
cudaDeviceSynchronize(); \
{\
CHECK_SE_CLASS1_POST(#cuda_call,__VA_ARGS__)\
cudaError_t e = cudaGetLastError();\
if (e != cudaSuccess)\
{\
std::string error = cudaGetErrorString(e);\
std::cout << "Cuda Error in: " << __FILE__ << ":" << __LINE__ << " " << error << std::endl;\
}\
}
}\
}
#else
#define CUDA_LAUNCH(cuda_call,grid_size,block_size, ...) \
cuda_call<<<(grid_size),(block_size)>>>(__VA_ARGS__);
#endif
#include "util/cuda/ofp_context.hxx"
#ifndef __NVCC__
#else
#ifndef __host__
#define __host__
#define __device__
#endif
#define CUDA_SAFE(cuda_call) \
cuda_call; \
{\
cudaError_t e = cudaPeekAtLastError();\
if (e != cudaSuccess)\
{\
std::string error = cudaGetErrorString(e);\
std::cout << "Cuda Error in: " << __FILE__ << ":" << __LINE__ << " " << error << std::endl;\
}\
}
#endif
#else
#ifndef __host__
#define __host__
#define __device__
#endif
#endif
#endif /* OPENFPM_DATA_SRC_UTIL_CUDA_UTIL_HPP_ */
#endif /* CUDA_LAUNCH_HPP_ */
......@@ -361,9 +361,9 @@ public:
this->base_ = static_cast<T *>(base);
}
/* \brief Set the internal pointer
/* \brief Get the internal pointer
*
* \param base internal pointer</