-
-
Save pramodk/8cf019caeb0b67e71860048f2998817f to your computer and use it in GitHub Desktop.
cacum.cpp: An example of NMODL generated code for cacum.mod (NEURON Simulator)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/********************************************************* | |
Model Name : cacum | |
Filename : cacum.mod | |
NMODL Version : 6.2.0 | |
Vectorized : true | |
Threadsafe : true | |
Created : Sun Dec 17 18:25:47 2023 | |
Backend : C++-OpenAcc (api-compatibility) | |
NMODL Compiler : 0.6 [6c89dc6 2023-11-22 08:34:22 +0100] | |
*********************************************************/ | |
#include <math.h> | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <string.h> | |
#include <coreneuron/utils/offload.hpp> | |
//#include <cuda_runtime_api.h> | |
#include <coreneuron/gpu/nrn_acc_manager.hpp> | |
#include <coreneuron/mechanism/mech/mod2c_core_thread.hpp> | |
#include <coreneuron/mechanism/register_mech.hpp> | |
#include <coreneuron/nrnconf.h> | |
#include <coreneuron/nrniv/nrniv_decl.h> | |
#include <coreneuron/sim/multicore.hpp> | |
#include <coreneuron/sim/scopmath/newton_thread.hpp> | |
#include <coreneuron/utils/ivocvect.hpp> | |
#include <coreneuron/utils/nrnoc_aux.hpp> | |
//#include <coreneuron/utils/randoms/nrnran123.h> | |
#include <newton/newton.hpp> | |
namespace coreneuron { | |
/** constants used in nmodl from UNITS */ | |
static const double FARADAY = 0x1.78e555060882cp+16; | |
#ifndef NRN_PRCELLSTATE | |
#define NRN_PRCELLSTATE 0 | |
#endif | |
/** channel information */ | |
static const char *mechanism[] = { | |
"6.2.0", | |
"cacum", | |
"depth_cacum", | |
"tau_cacum", | |
"cai0_cacum", | |
0, | |
"cmax_cacum", | |
"i_cacum", | |
0, | |
"cai_cacum", | |
0, | |
0 | |
}; | |
/** all global variables */ | |
struct cacum_Store { | |
int reset{}; | |
int mech_type{}; | |
double irest{0}; | |
int slist1[1]{5}; | |
int dlist1[1]{7}; | |
}; | |
static_assert(std::is_trivially_copy_constructible_v<cacum_Store>); | |
static_assert(std::is_trivially_move_constructible_v<cacum_Store>); | |
static_assert(std::is_trivially_copy_assignable_v<cacum_Store>); | |
static_assert(std::is_trivially_move_assignable_v<cacum_Store>); | |
static_assert(std::is_trivially_destructible_v<cacum_Store>); | |
cacum_Store cacum_global; | |
/** all mechanism instance variables and global variables */ | |
struct cacum_Instance { | |
const double* depth{}; | |
const double* tau{}; | |
const double* cai0{}; | |
double* cmax{}; | |
double* i{}; | |
double* cai{}; | |
double* ica{}; | |
double* Dcai{}; | |
double* v_unused{}; | |
double* g_unused{}; | |
cacum_Store* global{&cacum_global}; | |
}; | |
/** connect global (scalar) variables to hoc -- */ | |
static DoubScal hoc_scalar_double[] = { | |
{"irest_cacum", &cacum_global.irest}, | |
{nullptr, nullptr} | |
}; | |
/** connect global (array) variables to hoc -- */ | |
static DoubVec hoc_vector_double[] = { | |
{nullptr, nullptr, 0} | |
}; | |
static inline int first_pointer_var_index() { | |
return -1; | |
} | |
static inline int float_variables_size() { | |
return 10; | |
} | |
static inline int int_variables_size() { | |
return 0; | |
} | |
static inline int get_mech_type() { | |
return cacum_global.mech_type; | |
} | |
static inline Memb_list* get_memb_list(NrnThread* nt) { | |
if (!nt->_ml_list) { | |
return nullptr; | |
} | |
return nt->_ml_list[get_mech_type()]; | |
} | |
static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { | |
void* ptr; | |
//cudaMallocManaged(&ptr, num*size); | |
//cudaMemset(ptr, 0, num*size); | |
return ptr; | |
} | |
static inline void mem_free(void* ptr) { | |
//cudaFree(ptr); | |
} | |
static inline void coreneuron_abort() { | |
printf("Error : Issue while running OpenACC kernel \n"); | |
assert(0==1); | |
} | |
// Allocate instance structure | |
static void nrn_private_constructor_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
assert(!ml->instance); | |
assert(!ml->global_variables); | |
assert(ml->global_variables_size == 0); | |
auto* const inst = new cacum_Instance{}; | |
assert(inst->global == &cacum_global); | |
ml->instance = inst; | |
ml->global_variables = inst->global; | |
ml->global_variables_size = sizeof(cacum_Store); | |
} | |
static inline void copy_instance_to_device(NrnThread* nt, Memb_list* ml, cacum_Instance const* inst); | |
static inline void delete_instance_from_device(cacum_Instance* inst); | |
// Deallocate the instance structure | |
static void nrn_private_destructor_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
assert(inst); | |
assert(inst->global); | |
assert(inst->global == &cacum_global); | |
assert(inst->global == ml->global_variables); | |
assert(ml->global_variables_size == sizeof(cacum_Store)); | |
delete_instance_from_device(inst); | |
delete inst; | |
ml->instance = nullptr; | |
ml->global_variables = nullptr; | |
ml->global_variables_size = 0; | |
} | |
/** initialize mechanism instance variables */ | |
static inline void setup_instance(NrnThread* nt, Memb_list* ml) { | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
assert(inst); | |
assert(inst->global); | |
assert(inst->global == &cacum_global); | |
assert(inst->global == ml->global_variables); | |
assert(ml->global_variables_size == sizeof(cacum_Store)); | |
int pnodecount = ml->_nodecount_padded; | |
Datum* indexes = ml->pdata; | |
inst->depth = ml->data+0*pnodecount; | |
inst->tau = ml->data+1*pnodecount; | |
inst->cai0 = ml->data+2*pnodecount; | |
inst->cmax = ml->data+3*pnodecount; | |
inst->i = ml->data+4*pnodecount; | |
inst->cai = ml->data+5*pnodecount; | |
inst->ica = ml->data+6*pnodecount; | |
inst->Dcai = ml->data+7*pnodecount; | |
inst->v_unused = ml->data+8*pnodecount; | |
inst->g_unused = ml->data+9*pnodecount; | |
copy_instance_to_device(nt, ml, inst); | |
} | |
static inline void copy_instance_to_device(NrnThread* nt, Memb_list* ml, cacum_Instance const* inst) { | |
if (!nt->compute_gpu) { | |
return; | |
} | |
auto tmp = *inst; | |
auto* d_inst = cnrn_target_is_present(inst); | |
if (!d_inst) { | |
d_inst = cnrn_target_copyin(inst); | |
} | |
tmp.global = cnrn_target_deviceptr(tmp.global); | |
tmp.depth = cnrn_target_deviceptr(tmp.depth); | |
tmp.tau = cnrn_target_deviceptr(tmp.tau); | |
tmp.cai0 = cnrn_target_deviceptr(tmp.cai0); | |
tmp.cmax = cnrn_target_deviceptr(tmp.cmax); | |
tmp.i = cnrn_target_deviceptr(tmp.i); | |
tmp.cai = cnrn_target_deviceptr(tmp.cai); | |
tmp.ica = cnrn_target_deviceptr(tmp.ica); | |
tmp.Dcai = cnrn_target_deviceptr(tmp.Dcai); | |
tmp.v_unused = cnrn_target_deviceptr(tmp.v_unused); | |
tmp.g_unused = cnrn_target_deviceptr(tmp.g_unused); | |
cnrn_target_memcpy_to_device(d_inst, &tmp); | |
auto* d_ml = cnrn_target_deviceptr(ml); | |
void* d_inst_void = d_inst; | |
cnrn_target_memcpy_to_device(&(d_ml->instance), &d_inst_void); | |
} | |
static inline void delete_instance_from_device(cacum_Instance* inst) { | |
if (cnrn_target_is_present(inst)) { | |
cnrn_target_delete(inst); | |
} | |
} | |
static void nrn_alloc_cacum(double* data, Datum* indexes, int type) { | |
// do nothing | |
} | |
void nrn_constructor_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
#ifndef CORENEURON_BUILD | |
int nodecount = ml->nodecount; | |
int pnodecount = ml->_nodecount_padded; | |
const int* node_index = ml->nodeindices; | |
double* data = ml->data; | |
const double* voltage = nt->_actual_v; | |
Datum* indexes = ml->pdata; | |
ThreadDatum* thread = ml->_thread; | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
#endif | |
} | |
void nrn_destructor_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
#ifndef CORENEURON_BUILD | |
int nodecount = ml->nodecount; | |
int pnodecount = ml->_nodecount_padded; | |
const int* node_index = ml->nodeindices; | |
double* data = ml->data; | |
const double* voltage = nt->_actual_v; | |
Datum* indexes = ml->pdata; | |
ThreadDatum* thread = ml->_thread; | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
#endif | |
} | |
/** initialize channel */ | |
void nrn_init_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
nrn_pragma_acc(data present(nt, ml) if(nt->compute_gpu)) | |
{ | |
int nodecount = ml->nodecount; | |
int pnodecount = ml->_nodecount_padded; | |
const int* node_index = ml->nodeindices; | |
double* data = ml->data; | |
const double* voltage = nt->_actual_v; | |
Datum* indexes = ml->pdata; | |
ThreadDatum* thread = ml->_thread; | |
setup_instance(nt, ml); | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
if (nt->compute_gpu) { | |
nrn_pragma_acc(update device (cacum_global)) | |
nrn_pragma_omp(target update to(cacum_global)) | |
} | |
if (_nrn_skip_initmodel == 0) { | |
nrn_pragma_acc(parallel loop present(inst, node_index, data, voltage, indexes, thread) async(nt->stream_id) if(nt->compute_gpu)) | |
nrn_pragma_omp(target teams distribute parallel for if(nt->compute_gpu)) | |
for (int id = 0; id < nodecount; id++) { | |
int node_id = node_index[id]; | |
double v = voltage[node_id]; | |
#if NRN_PRCELLSTATE | |
inst->v_unused[id] = v; | |
#endif | |
inst->cai[id] = inst->cai0[id]; | |
inst->cai[id] = inst->cai0[id]; | |
inst->cmax[id] = inst->cai[id]; | |
} | |
} | |
} | |
} | |
inline double nrn_current_cacum(int id, int pnodecount, cacum_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v) { | |
double current = 0.0; | |
if (inst->cai[id] > inst->cmax[id]) { | |
inst->cmax[id] = inst->cai[id]; | |
} | |
inst->i[id] = 0.0; | |
current += inst->i[id]; | |
return current; | |
} | |
/** update current */ | |
void nrn_cur_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
nrn_pragma_acc(data present(nt, ml) if(nt->compute_gpu)) | |
{ | |
int nodecount = ml->nodecount; | |
int pnodecount = ml->_nodecount_padded; | |
const int* node_index = ml->nodeindices; | |
double* data = ml->data; | |
const double* voltage = nt->_actual_v; | |
double* vec_rhs = nt->_actual_rhs; | |
double* vec_d = nt->_actual_d; | |
Datum* indexes = ml->pdata; | |
ThreadDatum* thread = ml->_thread; | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
nrn_pragma_acc(parallel loop present(inst, node_index, data, voltage, indexes, thread, vec_rhs, vec_d) async(nt->stream_id) if(nt->compute_gpu)) | |
nrn_pragma_omp(target teams distribute parallel for if(nt->compute_gpu)) | |
for (int id = 0; id < nodecount; id++) { | |
int node_id = node_index[id]; | |
double v = voltage[node_id]; | |
#if NRN_PRCELLSTATE | |
inst->v_unused[id] = v; | |
#endif | |
double g = nrn_current_cacum(id, pnodecount, inst, data, indexes, thread, nt, v+0.001); | |
double rhs = nrn_current_cacum(id, pnodecount, inst, data, indexes, thread, nt, v); | |
g = (g-rhs)/0.001; | |
#if NRN_PRCELLSTATE | |
inst->g_unused[id] = g; | |
#endif | |
vec_rhs[node_id] -= rhs; | |
vec_d[node_id] += g; | |
} | |
} | |
} | |
/** update state */ | |
void nrn_state_cacum(NrnThread* nt, Memb_list* ml, int type) { | |
nrn_pragma_acc(data present(nt, ml) if(nt->compute_gpu)) | |
{ | |
int nodecount = ml->nodecount; | |
int pnodecount = ml->_nodecount_padded; | |
const int* node_index = ml->nodeindices; | |
double* data = ml->data; | |
const double* voltage = nt->_actual_v; | |
Datum* indexes = ml->pdata; | |
ThreadDatum* thread = ml->_thread; | |
auto* const inst = static_cast<cacum_Instance*>(ml->instance); | |
nrn_pragma_acc(parallel loop present(inst, node_index, data, voltage, indexes, thread) async(nt->stream_id) if(nt->compute_gpu)) | |
nrn_pragma_omp(target teams distribute parallel for if(nt->compute_gpu)) | |
for (int id = 0; id < nodecount; id++) { | |
int node_id = node_index[id]; | |
double v = voltage[node_id]; | |
#if NRN_PRCELLSTATE | |
inst->v_unused[id] = v; | |
#endif | |
struct functor_cacum_0 { | |
NrnThread* nt; | |
cacum_Instance* inst; | |
int id, pnodecount; | |
double v; | |
const Datum* indexes; | |
double* data; | |
ThreadDatum* thread; | |
double old_cai; | |
void initialize() { | |
old_cai = inst->cai[id]; | |
} | |
functor_cacum_0(NrnThread* nt, cacum_Instance* inst, int id, int pnodecount, double v, const Datum* indexes, double* data, ThreadDatum* thread) : nt{nt}, inst{inst}, id{id}, pnodecount{pnodecount}, v{v}, indexes{indexes}, data{data}, thread{thread} {} | |
void operator()(const Eigen::Matrix<double, 1, 1>& nmodl_eigen_xm, Eigen::Matrix<double, 1, 1>& nmodl_eigen_fm, Eigen::Matrix<double, 1, 1>& nmodl_eigen_jm) const { | |
const double* nmodl_eigen_x = nmodl_eigen_xm.data(); | |
double* nmodl_eigen_j = nmodl_eigen_jm.data(); | |
double* nmodl_eigen_f = nmodl_eigen_fm.data(); | |
nmodl_eigen_f[static_cast<int>(0)] = (FARADAY * inst->depth[id] * nt->_dt * ( -nmodl_eigen_x[static_cast<int>(0)] + inst->cai0[id]) + FARADAY * inst->depth[id] * inst->tau[id] * ( -nmodl_eigen_x[static_cast<int>(0)] + old_cai) + 5000.0 * nt->_dt * inst->tau[id] * ( -inst->ica[id] + inst->global->irest)) / (FARADAY * inst->depth[id] * inst->tau[id]); | |
nmodl_eigen_j[static_cast<int>(0)] = ( -nt->_dt - inst->tau[id]) / inst->tau[id]; | |
} | |
void finalize() { | |
} | |
}; | |
Eigen::Matrix<double, 1, 1> nmodl_eigen_xm; | |
double* nmodl_eigen_x = nmodl_eigen_xm.data(); | |
nmodl_eigen_x[static_cast<int>(0)] = inst->cai[id]; | |
// call newton solver | |
functor_cacum_0 newton_functor(nt, inst, id, pnodecount, v, indexes, data, thread); | |
newton_functor.initialize(); | |
int newton_iterations = nmodl::newton::newton_solver(nmodl_eigen_xm, newton_functor); | |
if (newton_iterations < 0) assert(false && "Newton solver did not converge!"); | |
inst->cai[id] = nmodl_eigen_x[static_cast<int>(0)]; | |
newton_functor.finalize(); | |
} | |
} | |
} | |
/** register channel with the simulator */ | |
void _hh_reg() { | |
int mech_type = nrn_get_mechtype("cacum"); | |
cacum_global.mech_type = mech_type; | |
if (mech_type == -1) { | |
return; | |
} | |
_nrn_layout_reg(mech_type, 0); | |
register_mech(mechanism, nrn_alloc_cacum, nrn_cur_cacum, nullptr, nrn_state_cacum, nrn_init_cacum, nrn_private_constructor_cacum, nrn_private_destructor_cacum, first_pointer_var_index(), 1); | |
hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); | |
hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment