Commit 8cd48566 authored by argupta's avatar argupta
Browse files

removes cuda_on_cpu file from git

parent 7a834ebe
Pipeline #3163 failed with stages
in 38 seconds
#include "VCluster.hpp"
#ifndef __CYGWIN__
#include <execinfo.h>
#include "util/print_stack.hpp"
#include "util/math_util_complex.hpp"
Vcluster<> * global_v_cluster_private_heap = NULL;
Vcluster<CudaMemory> * global_v_cluster_private_cuda = NULL;
std::vector<int> sieve_spf;
// number of vcluster instances
size_t n_vcluster = 0;
bool ofp_initialized = false;
size_t tot_sent = 0;
size_t tot_recv = 0;
//! NBX has a potential pitfall that must be addressed,
//! NBX Send all the messages and probe for incoming messages,
//! if there is an incoming message it receive it producing
//! an acknowledge notification on the sending processor.
//! When all the sends has been acknowledged, the processor call the MPI_Ibarrier
//! when all the processors call MPI_Ibarrier all send has been received.
//! While the processors are waiting for the MPI_Ibarrier to complete, all processors
//! are still probing for incoming message, Unfortunately some processor
//! can quit the MPI_Ibarrier before others and this mean that some
//! processor can exit the probing status before others, these processors can in theory
//! start new communications while the other processor are still in probing status producing
//! a wrong send/recv association to
//! resolve this problem an incremental NBX_cnt is used as message TAG to distinguish that the
//! messages come from other send or subsequent NBX procedures
size_t NBX_cnt = 0;
std::string program_name;
init_options global_option;
#ifdef CUDA_GPU
#include "memory/CudaMemory.cuh"
CudaMemory mem_tmp;
CudaMemory rem_tmp;
CudaMemory rem_tmp2[MAX_NUMER_OF_PROPERTIES];
CudaMemory exp_tmp;
CudaMemory exp_tmp2[MAX_NUMER_OF_PROPERTIES];
// Segmentation fault signal handler
void bt_sighandler(int sig, siginfo_t * info, void * ctx_p)
if (sig == SIGSEGV)
std::cout << "Got signal " << sig << " faulty address is %p, " << info->si_addr << " from " << info->si_pid << std::endl;
std:: cout << "Got signal " << sig << std::endl;
double time_spent = 0.0;
/*! \brief Initialize a global instance of Runtime Virtual Cluster Machine
* Initialize a global instance of Runtime Virtual Cluster Machine
void init_global_v_cluster_private(int *argc, char ***argv, init_options option)
global_option = option;
MPI_Comm comm_compute;
if (option == init_options::in_situ_visualization)
comm_compute = initialize_in_situ(argc, argv);
//PETSC initialize?
if (global_v_cluster_private_heap == NULL)
{global_v_cluster_private_heap = new Vcluster<>(argc,argv,comm_compute);}
if (global_v_cluster_private_cuda == NULL)
{global_v_cluster_private_cuda = new Vcluster<CudaMemory>(argc,argv);}
void delete_global_v_cluster_private()
delete global_v_cluster_private_heap;
delete global_v_cluster_private_cuda;
/*! \brief Initialize the library
* This function MUST be called before any other function
void openfpm_init_vcl(int *argc, char ***argv)
#ifdef SE_CLASS1
std::cout << "OpenFPM is compiled with debug mode LEVEL:1. Remember to remove SE_CLASS1 when you go in production" << std::endl;
#ifdef SE_CLASS2
std::cout << "OpenFPM is compiled with debug mode LEVEL:2. Remember to remove SE_CLASS2 when you go in production" << std::endl;
#ifdef SE_CLASS3
std::cout << "OpenFPM is compiled with debug mode LEVEL:3. Remember to remove SE_CLASS3 when you go in production" << std::endl;
#ifdef CUDA_ON_CPU
// install segmentation fault signal handler
struct sigaction sa;
sa.sa_sigaction = bt_sighandler;
sa.sa_flags = SA_RESTART;
sigaction(SIGSEGV, &sa, NULL);
if (argc != NULL && *argc != 0)
{program_name = std::string(*argv[0]);}
// Initialize math pre-computation tables
ofp_initialized = true;
#ifdef CUDA_GPU
// Initialize temporal memory
for (int i = 0 ; i < MAX_NUMER_OF_PROPERTIES ; i++)
size_t openfpm_vcluster_compilation_mask()
size_t compiler_mask = 0;
#ifdef CUDA_ON_CPU
compiler_mask |= 0x1;
#ifdef CUDA_GPU
compiler_mask |= 0x04;
return compiler_mask;
/*! \brief Finalize the library
* This function MUST be called at the end of the program
void openfpm_finalize()
if (global_option == init_options::in_situ_visualization)
MPI_Request bar_req;
ofp_initialized = false;
#ifdef CUDA_GPU
// Release memory
for (int i = 0 ; i < MAX_NUMER_OF_PROPERTIES ; i++)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment