Created
October 5, 2019 22:47
-
-
Save knkumar/5fa1d255f508308aaafb731ffda057f3 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1: | |
2: // includes from the plugin | |
3: // [[Rcpp::plugins(cpp14)]] | |
4: | |
5: // user includes | |
6: #define STAN__SERVICES__COMMAND_HPP// Code generated by Stan version 2.19.1 | |
7: | |
8: #include <stan/model/model_header.hpp> | |
9: | |
10: namespace model3bf038e124f5_att_model1_namespace { | |
11: | |
12: using std::istream; | |
13: using std::string; | |
14: using std::stringstream; | |
15: using std::vector; | |
16: using stan::io::dump; | |
17: using stan::math::lgamma; | |
18: using stan::model::prob_grad; | |
19: using namespace stan::math; | |
20: | |
21: static int current_statement_begin__; | |
22: | |
23: stan::io::program_reader prog_reader__() { | |
24: stan::io::program_reader reader; | |
25: reader.add_event(0, 0, "start", "model3bf038e124f5_att_model1"); | |
26: reader.add_event(176, 174, "end", "model3bf038e124f5_att_model1"); | |
27: return reader; | |
28: } | |
29: | |
30: template <typename T0__, typename T1__, typename T2__, typename T3__> | |
31: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__>::type | |
32: wt_integrand(const T0__& t, | |
33: const T1__& tc, | |
34: const std::vector<T2__>& theta, | |
35: const std::vector<T3__>& x_r, | |
36: const std::vector<int>& x_i, std::ostream* pstream__) { | |
37: typedef typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__>::type local_scalar_t__; | |
38: typedef local_scalar_t__ fun_return_scalar_t__; | |
39: const static bool propto__ = true; | |
40: (void) propto__; | |
41: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
42: (void) DUMMY_VAR__; // suppress unused var warning | |
43: | |
44: int current_statement_begin__ = -1; | |
45: try { | |
46: { | |
47: current_statement_begin__ = 5; | |
48: local_scalar_t__ it_a(DUMMY_VAR__); | |
49: (void) it_a; // dummy to suppress unused var warning | |
50: stan::math::initialize(it_a, DUMMY_VAR__); | |
51: stan::math::fill(it_a, DUMMY_VAR__); | |
52: stan::math::assign(it_a,get_base1(theta, 1, "theta", 1)); | |
53: | |
54: current_statement_begin__ = 6; | |
55: local_scalar_t__ it_b(DUMMY_VAR__); | |
56: (void) it_b; // dummy to suppress unused var warning | |
57: stan::math::initialize(it_b, DUMMY_VAR__); | |
58: stan::math::fill(it_b, DUMMY_VAR__); | |
59: stan::math::assign(it_b,get_base1(theta, 2, "theta", 1)); | |
60: | |
61: current_statement_begin__ = 7; | |
62: local_scalar_t__ ot_a(DUMMY_VAR__); | |
63: (void) ot_a; // dummy to suppress unused var warning | |
64: stan::math::initialize(ot_a, DUMMY_VAR__); | |
65: stan::math::fill(ot_a, DUMMY_VAR__); | |
66: stan::math::assign(ot_a,get_base1(theta, 3, "theta", 1)); | |
67: | |
68: current_statement_begin__ = 8; | |
69: local_scalar_t__ ot_b(DUMMY_VAR__); | |
70: (void) ot_b; // dummy to suppress unused var warning | |
71: stan::math::initialize(ot_b, DUMMY_VAR__); | |
72: stan::math::fill(ot_b, DUMMY_VAR__); | |
73: stan::math::assign(ot_b,get_base1(theta, 4, "theta", 1)); | |
74: | |
75: current_statement_begin__ = 9; | |
76: local_scalar_t__ T(DUMMY_VAR__); | |
77: (void) T; // dummy to suppress unused var warning | |
78: stan::math::initialize(T, DUMMY_VAR__); | |
79: stan::math::fill(T, DUMMY_VAR__); | |
80: stan::math::assign(T,get_base1(theta, 5, "theta", 1)); | |
81: | |
82: current_statement_begin__ = 10; | |
83: local_scalar_t__ p_c(DUMMY_VAR__); | |
84: (void) p_c; // dummy to suppress unused var warning | |
85: stan::math::initialize(p_c, DUMMY_VAR__); | |
86: stan::math::fill(p_c, DUMMY_VAR__); | |
87: stan::math::assign(p_c,get_base1(theta, 6, "theta", 1)); | |
88: | |
89: current_statement_begin__ = 11; | |
90: local_scalar_t__ p_d(DUMMY_VAR__); | |
91: (void) p_d; // dummy to suppress unused var warning | |
92: stan::math::initialize(p_d, DUMMY_VAR__); | |
93: stan::math::fill(p_d, DUMMY_VAR__); | |
94: stan::math::assign(p_d,get_base1(theta, 7, "theta", 1)); | |
95: | |
96: current_statement_begin__ = 12; | |
97: local_scalar_t__ likelihood(DUMMY_VAR__); | |
98: (void) likelihood; // dummy to suppress unused var warning | |
99: stan::math::initialize(likelihood, DUMMY_VAR__); | |
100: stan::math::fill(likelihood, DUMMY_VAR__); | |
101: | |
102: current_statement_begin__ = 13; | |
103: local_scalar_t__ I(DUMMY_VAR__); | |
104: (void) I; // dummy to suppress unused var warning | |
105: stan::math::initialize(I, DUMMY_VAR__); | |
106: stan::math::fill(I, DUMMY_VAR__); | |
107: | |
108: current_statement_begin__ = 14; | |
109: local_scalar_t__ o(DUMMY_VAR__); | |
110: (void) o; // dummy to suppress unused var warning | |
111: stan::math::initialize(o, DUMMY_VAR__); | |
112: stan::math::fill(o, DUMMY_VAR__); | |
113: | |
114: | |
115: current_statement_begin__ = 15; | |
116: stan::math::assign(I, gamma_cdf_log((T - t), it_a, it_b)); | |
117: current_statement_begin__ = 16; | |
118: stan::math::assign(o, gamma_log(t, ot_a, ot_b)); | |
119: current_statement_begin__ = 17; | |
120: stan::math::assign(likelihood, (((o + p_c) + log_diff_exp(0, p_c)) + log_sum_exp(log_diff_exp(0, I), (I + log_diff_exp(0, p_d))))); | |
121: current_statement_begin__ = 18; | |
122: return stan::math::promote_scalar<fun_return_scalar_t__>(likelihood); | |
123: } | |
124: } catch (const std::exception& e) { | |
125: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
126: // Next line prevents compiler griping about no return | |
127: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
128: } | |
129: } | |
130: | |
131: | |
132: struct wt_integrand_functor__ { | |
133: template <typename T0__, typename T1__, typename T2__, typename T3__> | |
134: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__>::type | |
135: operator()(const T0__& t, | |
136: const T1__& tc, | |
137: const std::vector<T2__>& theta, | |
138: const std::vector<T3__>& x_r, | |
139: const std::vector<int>& x_i, std::ostream* pstream__) const { | |
140: return wt_integrand(t, tc, theta, x_r, x_i, pstream__); | |
141: } | |
142: }; | |
143: | |
144: template <typename T0__, typename T1__, typename T2__, typename T3__, typename T4__, typename T5__, typename T6__> | |
145: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__, typename boost::math::tools::promote_args<T4__, T5__, T6__>::type>::type | |
146: wt_prob(const T0__& T, | |
147: const T1__& it_a, | |
148: const T2__& it_b, | |
149: const T3__& ot_a, | |
150: const T4__& ot_b, | |
151: const T5__& c, | |
152: const T6__& d, std::ostream* pstream__) { | |
153: typedef typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__, typename boost::math::tools::promote_args<T4__, T5__, T6__>::type>::type local_scalar_t__; | |
154: typedef local_scalar_t__ fun_return_scalar_t__; | |
155: const static bool propto__ = true; | |
156: (void) propto__; | |
157: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
158: (void) DUMMY_VAR__; // suppress unused var warning | |
159: | |
160: int current_statement_begin__ = -1; | |
161: try { | |
162: { | |
163: current_statement_begin__ = 22; | |
164: local_scalar_t__ NO(DUMMY_VAR__); | |
165: (void) NO; // dummy to suppress unused var warning | |
166: stan::math::initialize(NO, DUMMY_VAR__); | |
167: stan::math::fill(NO, DUMMY_VAR__); | |
168: stan::math::assign(NO,log_diff_exp(0, gamma_cdf_log(T, ot_a, ot_b))); | |
169: | |
170: | |
171: current_statement_begin__ = 23; | |
172: return stan::math::promote_scalar<fun_return_scalar_t__>(log_sum_exp(NO, integrate_1d(wt_integrand_functor__(), 0, T, static_cast<std::vector<local_scalar_t__> >(stan::math::array_builder<local_scalar_t__ >().add(it_a).add(it_b).add(ot_a).add(ot_b).add(T).add(c).add(d).array()), static_cast<std::vector<local_scalar_t__> >(stan::math::array_builder<local_scalar_t__ >().add(1.0).array()), static_cast<std::vector<int> >(stan::math::array_builder<int >().add(1).array()), *pstream__, 0.001))); | |
173: } | |
174: } catch (const std::exception& e) { | |
175: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
176: // Next line prevents compiler griping about no return | |
177: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
178: } | |
179: } | |
180: | |
181: | |
182: struct wt_prob_functor__ { | |
183: template <typename T0__, typename T1__, typename T2__, typename T3__, typename T4__, typename T5__, typename T6__> | |
184: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__, typename boost::math::tools::promote_args<T4__, T5__, T6__>::type>::type | |
185: operator()(const T0__& T, | |
186: const T1__& it_a, | |
187: const T2__& it_b, | |
188: const T3__& ot_a, | |
189: const T4__& ot_b, | |
190: const T5__& c, | |
191: const T6__& d, std::ostream* pstream__) const { | |
192: return wt_prob(T, it_a, it_b, ot_a, ot_b, c, d, pstream__); | |
193: } | |
194: }; | |
195: | |
196: template <typename T0__, typename T1__, typename T2__, typename T3__> | |
197: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__>::type | |
198: yt_integrand(const T0__& t, | |
199: const T1__& tc, | |
200: const std::vector<T2__>& theta, | |
201: const std::vector<T3__>& x_r, | |
202: const std::vector<int>& x_i, std::ostream* pstream__) { | |
203: typedef typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__>::type local_scalar_t__; | |
204: typedef local_scalar_t__ fun_return_scalar_t__; | |
205: const static bool propto__ = true; | |
206: (void) propto__; | |
207: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
208: (void) DUMMY_VAR__; // suppress unused var warning | |
209: | |
210: int current_statement_begin__ = -1; | |
211: try { | |
212: { | |
213: current_statement_begin__ = 27; | |
214: local_scalar_t__ it_a(DUMMY_VAR__); | |
215: (void) it_a; // dummy to suppress unused var warning | |
216: stan::math::initialize(it_a, DUMMY_VAR__); | |
217: stan::math::fill(it_a, DUMMY_VAR__); | |
218: stan::math::assign(it_a,get_base1(theta, 1, "theta", 1)); | |
219: | |
220: current_statement_begin__ = 28; | |
221: local_scalar_t__ it_b(DUMMY_VAR__); | |
222: (void) it_b; // dummy to suppress unused var warning | |
223: stan::math::initialize(it_b, DUMMY_VAR__); | |
224: stan::math::fill(it_b, DUMMY_VAR__); | |
225: stan::math::assign(it_b,get_base1(theta, 2, "theta", 1)); | |
226: | |
227: current_statement_begin__ = 29; | |
228: local_scalar_t__ ot_a(DUMMY_VAR__); | |
229: (void) ot_a; // dummy to suppress unused var warning | |
230: stan::math::initialize(ot_a, DUMMY_VAR__); | |
231: stan::math::fill(ot_a, DUMMY_VAR__); | |
232: stan::math::assign(ot_a,get_base1(theta, 3, "theta", 1)); | |
233: | |
234: current_statement_begin__ = 30; | |
235: local_scalar_t__ ot_b(DUMMY_VAR__); | |
236: (void) ot_b; // dummy to suppress unused var warning | |
237: stan::math::initialize(ot_b, DUMMY_VAR__); | |
238: stan::math::fill(ot_b, DUMMY_VAR__); | |
239: stan::math::assign(ot_b,get_base1(theta, 4, "theta", 1)); | |
240: | |
241: current_statement_begin__ = 31; | |
242: local_scalar_t__ t_star(DUMMY_VAR__); | |
243: (void) t_star; // dummy to suppress unused var warning | |
244: stan::math::initialize(t_star, DUMMY_VAR__); | |
245: stan::math::fill(t_star, DUMMY_VAR__); | |
246: stan::math::assign(t_star,get_base1(theta, 5, "theta", 1)); | |
247: | |
248: current_statement_begin__ = 32; | |
249: local_scalar_t__ p_c(DUMMY_VAR__); | |
250: (void) p_c; // dummy to suppress unused var warning | |
251: stan::math::initialize(p_c, DUMMY_VAR__); | |
252: stan::math::fill(p_c, DUMMY_VAR__); | |
253: stan::math::assign(p_c,get_base1(theta, 6, "theta", 1)); | |
254: | |
255: current_statement_begin__ = 33; | |
256: local_scalar_t__ p_d(DUMMY_VAR__); | |
257: (void) p_d; // dummy to suppress unused var warning | |
258: stan::math::initialize(p_d, DUMMY_VAR__); | |
259: stan::math::fill(p_d, DUMMY_VAR__); | |
260: stan::math::assign(p_d,get_base1(theta, 7, "theta", 1)); | |
261: | |
262: current_statement_begin__ = 34; | |
263: local_scalar_t__ likelihood(DUMMY_VAR__); | |
264: (void) likelihood; // dummy to suppress unused var warning | |
265: stan::math::initialize(likelihood, DUMMY_VAR__); | |
266: stan::math::fill(likelihood, DUMMY_VAR__); | |
267: | |
268: | |
269: current_statement_begin__ = 36; | |
270: stan::math::assign(likelihood, (((gamma_log(t, ot_a, ot_b) + log_sum_exp(0, p_c)) + gamma_log((t_star - t), it_a, it_b)) + p_d)); | |
271: current_statement_begin__ = 37; | |
272: return stan::math::promote_scalar<fun_return_scalar_t__>(likelihood); | |
273: } | |
274: } catch (const std::exception& e) { | |
275: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
276: // Next line prevents compiler griping about no return | |
277: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
278: } | |
279: } | |
280: | |
281: | |
282: struct yt_integrand_functor__ { | |
283: template <typename T0__, typename T1__, typename T2__, typename T3__> | |
284: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__>::type | |
285: operator()(const T0__& t, | |
286: const T1__& tc, | |
287: const std::vector<T2__>& theta, | |
288: const std::vector<T3__>& x_r, | |
289: const std::vector<int>& x_i, std::ostream* pstream__) const { | |
290: return yt_integrand(t, tc, theta, x_r, x_i, pstream__); | |
291: } | |
292: }; | |
293: | |
294: template <typename T0__, typename T1__, typename T2__, typename T3__, typename T4__, typename T5__, typename T6__, typename T7__> | |
295: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__, typename boost::math::tools::promote_args<T4__, T5__, T6__, T7__>::type>::type | |
296: yt_prob(const T0__& t_star, | |
297: const T1__& it_a, | |
298: const T2__& it_b, | |
299: const T3__& ot_a, | |
300: const T4__& ot_b, | |
301: const T5__& T, | |
302: const T6__& c, | |
303: const T7__& d, std::ostream* pstream__) { | |
304: typedef typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__, typename boost::math::tools::promote_args<T4__, T5__, T6__, T7__>::type>::type local_scalar_t__; | |
305: typedef local_scalar_t__ fun_return_scalar_t__; | |
306: const static bool propto__ = true; | |
307: (void) propto__; | |
308: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
309: (void) DUMMY_VAR__; // suppress unused var warning | |
310: | |
311: int current_statement_begin__ = -1; | |
312: try { | |
313: { | |
314: current_statement_begin__ = 41; | |
315: local_scalar_t__ ON(DUMMY_VAR__); | |
316: (void) ON; // dummy to suppress unused var warning | |
317: stan::math::initialize(ON, DUMMY_VAR__); | |
318: stan::math::fill(ON, DUMMY_VAR__); | |
319: stan::math::assign(ON,(gamma_log(t_star, ot_a, ot_b) + c)); | |
320: | |
321: | |
322: current_statement_begin__ = 42; | |
323: return stan::math::promote_scalar<fun_return_scalar_t__>(log_sum_exp(ON, integrate_1d(yt_integrand_functor__(), 0, T, static_cast<std::vector<local_scalar_t__> >(stan::math::array_builder<local_scalar_t__ >().add(it_a).add(it_b).add(ot_a).add(ot_b).add(t_star).add(c).add(d).array()), static_cast<std::vector<local_scalar_t__> >(stan::math::array_builder<local_scalar_t__ >().add(1.0).array()), static_cast<std::vector<int> >(stan::math::array_builder<int >().add(1).array()), *pstream__, 0.001))); | |
324: } | |
325: } catch (const std::exception& e) { | |
326: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
327: // Next line prevents compiler griping about no return | |
328: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
329: } | |
330: } | |
331: | |
332: | |
333: struct yt_prob_functor__ { | |
334: template <typename T0__, typename T1__, typename T2__, typename T3__, typename T4__, typename T5__, typename T6__, typename T7__> | |
335: typename boost::math::tools::promote_args<T0__, T1__, T2__, T3__, typename boost::math::tools::promote_args<T4__, T5__, T6__, T7__>::type>::type | |
336: operator()(const T0__& t_star, | |
337: const T1__& it_a, | |
338: const T2__& it_b, | |
339: const T3__& ot_a, | |
340: const T4__& ot_b, | |
341: const T5__& T, | |
342: const T6__& c, | |
343: const T7__& d, std::ostream* pstream__) const { | |
344: return yt_prob(t_star, it_a, it_b, ot_a, ot_b, T, c, d, pstream__); | |
345: } | |
346: }; | |
347: | |
348: class model3bf038e124f5_att_model1 : public prob_grad { | |
349: private: | |
350: int N_T_NR_T; | |
351: int N_T_NR_F; | |
352: int N_T_F_F; | |
353: int N_T_T_F; | |
354: int N_T_F_T; | |
355: int N_F_NR_F; | |
356: int N_F_NR_N; | |
357: int N_F_N_F; | |
358: int N_F_N_N; | |
359: int C; | |
360: matrix_d T_no_T; | |
361: matrix_d T_no_F; | |
362: matrix_d T_F_to_F; | |
363: matrix_d T_T_to_F; | |
364: matrix_d T_F_to_T; | |
365: matrix_d F_no_F; | |
366: matrix_d F_no_N; | |
367: matrix_d F_N_to_F; | |
368: matrix_d F_N_to_N; | |
369: public: | |
370: model3bf038e124f5_att_model1(stan::io::var_context& context__, | |
371: std::ostream* pstream__ = 0) | |
372: : prob_grad(0) { | |
373: ctor_body(context__, 0, pstream__); | |
374: } | |
375: | |
376: model3bf038e124f5_att_model1(stan::io::var_context& context__, | |
377: unsigned int random_seed__, | |
378: std::ostream* pstream__ = 0) | |
379: : prob_grad(0) { | |
380: ctor_body(context__, random_seed__, pstream__); | |
381: } | |
382: | |
383: void ctor_body(stan::io::var_context& context__, | |
384: unsigned int random_seed__, | |
385: std::ostream* pstream__) { | |
386: typedef double local_scalar_t__; | |
387: | |
388: boost::ecuyer1988 base_rng__ = | |
389: stan::services::util::create_rng(random_seed__, 0); | |
390: (void) base_rng__; // suppress unused var warning | |
391: | |
392: current_statement_begin__ = -1; | |
393: | |
394: static const char* function__ = "model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1"; | |
395: (void) function__; // dummy to suppress unused var warning | |
396: size_t pos__; | |
397: (void) pos__; // dummy to suppress unused var warning | |
398: std::vector<int> vals_i__; | |
399: std::vector<double> vals_r__; | |
400: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
401: (void) DUMMY_VAR__; // suppress unused var warning | |
402: | |
403: try { | |
404: // initialize data block variables from context__ | |
405: current_statement_begin__ = 48; | |
406: context__.validate_dims("data initialization", "N_T_NR_T", "int", context__.to_vec()); | |
407: N_T_NR_T = int(0); | |
408: vals_i__ = context__.vals_i("N_T_NR_T"); | |
409: pos__ = 0; | |
410: N_T_NR_T = vals_i__[pos__++]; | |
411: | |
412: current_statement_begin__ = 49; | |
413: context__.validate_dims("data initialization", "N_T_NR_F", "int", context__.to_vec()); | |
414: N_T_NR_F = int(0); | |
415: vals_i__ = context__.vals_i("N_T_NR_F"); | |
416: pos__ = 0; | |
417: N_T_NR_F = vals_i__[pos__++]; | |
418: | |
419: current_statement_begin__ = 50; | |
420: context__.validate_dims("data initialization", "N_T_F_F", "int", context__.to_vec()); | |
421: N_T_F_F = int(0); | |
422: vals_i__ = context__.vals_i("N_T_F_F"); | |
423: pos__ = 0; | |
424: N_T_F_F = vals_i__[pos__++]; | |
425: | |
426: current_statement_begin__ = 51; | |
427: context__.validate_dims("data initialization", "N_T_T_F", "int", context__.to_vec()); | |
428: N_T_T_F = int(0); | |
429: vals_i__ = context__.vals_i("N_T_T_F"); | |
430: pos__ = 0; | |
431: N_T_T_F = vals_i__[pos__++]; | |
432: | |
433: current_statement_begin__ = 52; | |
434: context__.validate_dims("data initialization", "N_T_F_T", "int", context__.to_vec()); | |
435: N_T_F_T = int(0); | |
436: vals_i__ = context__.vals_i("N_T_F_T"); | |
437: pos__ = 0; | |
438: N_T_F_T = vals_i__[pos__++]; | |
439: | |
440: current_statement_begin__ = 54; | |
441: context__.validate_dims("data initialization", "N_F_NR_F", "int", context__.to_vec()); | |
442: N_F_NR_F = int(0); | |
443: vals_i__ = context__.vals_i("N_F_NR_F"); | |
444: pos__ = 0; | |
445: N_F_NR_F = vals_i__[pos__++]; | |
446: | |
447: current_statement_begin__ = 55; | |
448: context__.validate_dims("data initialization", "N_F_NR_N", "int", context__.to_vec()); | |
449: N_F_NR_N = int(0); | |
450: vals_i__ = context__.vals_i("N_F_NR_N"); | |
451: pos__ = 0; | |
452: N_F_NR_N = vals_i__[pos__++]; | |
453: | |
454: current_statement_begin__ = 56; | |
455: context__.validate_dims("data initialization", "N_F_N_F", "int", context__.to_vec()); | |
456: N_F_N_F = int(0); | |
457: vals_i__ = context__.vals_i("N_F_N_F"); | |
458: pos__ = 0; | |
459: N_F_N_F = vals_i__[pos__++]; | |
460: | |
461: current_statement_begin__ = 57; | |
462: context__.validate_dims("data initialization", "N_F_N_N", "int", context__.to_vec()); | |
463: N_F_N_N = int(0); | |
464: vals_i__ = context__.vals_i("N_F_N_N"); | |
465: pos__ = 0; | |
466: N_F_N_N = vals_i__[pos__++]; | |
467: | |
468: current_statement_begin__ = 58; | |
469: context__.validate_dims("data initialization", "C", "int", context__.to_vec()); | |
470: C = int(0); | |
471: vals_i__ = context__.vals_i("C"); | |
472: pos__ = 0; | |
473: C = vals_i__[pos__++]; | |
474: | |
475: current_statement_begin__ = 60; | |
476: validate_non_negative_index("T_no_T", "N_T_NR_T", N_T_NR_T); | |
477: validate_non_negative_index("T_no_T", "C", C); | |
478: context__.validate_dims("data initialization", "T_no_T", "matrix_d", context__.to_vec(N_T_NR_T,C)); | |
479: T_no_T = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_T_NR_T, C); | |
480: vals_r__ = context__.vals_r("T_no_T"); | |
481: pos__ = 0; | |
482: size_t T_no_T_j_2_max__ = C; | |
483: size_t T_no_T_j_1_max__ = N_T_NR_T; | |
484: for (size_t j_2__ = 0; j_2__ < T_no_T_j_2_max__; ++j_2__) { | |
485: for (size_t j_1__ = 0; j_1__ < T_no_T_j_1_max__; ++j_1__) { | |
486: T_no_T(j_1__, j_2__) = vals_r__[pos__++]; | |
487: } | |
488: } | |
489: | |
490: current_statement_begin__ = 61; | |
491: validate_non_negative_index("T_no_F", "N_T_NR_F", N_T_NR_F); | |
492: validate_non_negative_index("T_no_F", "C", C); | |
493: context__.validate_dims("data initialization", "T_no_F", "matrix_d", context__.to_vec(N_T_NR_F,C)); | |
494: T_no_F = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_T_NR_F, C); | |
495: vals_r__ = context__.vals_r("T_no_F"); | |
496: pos__ = 0; | |
497: size_t T_no_F_j_2_max__ = C; | |
498: size_t T_no_F_j_1_max__ = N_T_NR_F; | |
499: for (size_t j_2__ = 0; j_2__ < T_no_F_j_2_max__; ++j_2__) { | |
500: for (size_t j_1__ = 0; j_1__ < T_no_F_j_1_max__; ++j_1__) { | |
501: T_no_F(j_1__, j_2__) = vals_r__[pos__++]; | |
502: } | |
503: } | |
504: | |
505: current_statement_begin__ = 62; | |
506: validate_non_negative_index("T_F_to_F", "N_T_F_F", N_T_F_F); | |
507: validate_non_negative_index("T_F_to_F", "C", C); | |
508: context__.validate_dims("data initialization", "T_F_to_F", "matrix_d", context__.to_vec(N_T_F_F,C)); | |
509: T_F_to_F = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_T_F_F, C); | |
510: vals_r__ = context__.vals_r("T_F_to_F"); | |
511: pos__ = 0; | |
512: size_t T_F_to_F_j_2_max__ = C; | |
513: size_t T_F_to_F_j_1_max__ = N_T_F_F; | |
514: for (size_t j_2__ = 0; j_2__ < T_F_to_F_j_2_max__; ++j_2__) { | |
515: for (size_t j_1__ = 0; j_1__ < T_F_to_F_j_1_max__; ++j_1__) { | |
516: T_F_to_F(j_1__, j_2__) = vals_r__[pos__++]; | |
517: } | |
518: } | |
519: | |
520: current_statement_begin__ = 63; | |
521: validate_non_negative_index("T_T_to_F", "N_T_T_F", N_T_T_F); | |
522: validate_non_negative_index("T_T_to_F", "C", C); | |
523: context__.validate_dims("data initialization", "T_T_to_F", "matrix_d", context__.to_vec(N_T_T_F,C)); | |
524: T_T_to_F = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_T_T_F, C); | |
525: vals_r__ = context__.vals_r("T_T_to_F"); | |
526: pos__ = 0; | |
527: size_t T_T_to_F_j_2_max__ = C; | |
528: size_t T_T_to_F_j_1_max__ = N_T_T_F; | |
529: for (size_t j_2__ = 0; j_2__ < T_T_to_F_j_2_max__; ++j_2__) { | |
530: for (size_t j_1__ = 0; j_1__ < T_T_to_F_j_1_max__; ++j_1__) { | |
531: T_T_to_F(j_1__, j_2__) = vals_r__[pos__++]; | |
532: } | |
533: } | |
534: | |
535: current_statement_begin__ = 64; | |
536: validate_non_negative_index("T_F_to_T", "N_T_F_T", N_T_F_T); | |
537: validate_non_negative_index("T_F_to_T", "C", C); | |
538: context__.validate_dims("data initialization", "T_F_to_T", "matrix_d", context__.to_vec(N_T_F_T,C)); | |
539: T_F_to_T = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_T_F_T, C); | |
540: vals_r__ = context__.vals_r("T_F_to_T"); | |
541: pos__ = 0; | |
542: size_t T_F_to_T_j_2_max__ = C; | |
543: size_t T_F_to_T_j_1_max__ = N_T_F_T; | |
544: for (size_t j_2__ = 0; j_2__ < T_F_to_T_j_2_max__; ++j_2__) { | |
545: for (size_t j_1__ = 0; j_1__ < T_F_to_T_j_1_max__; ++j_1__) { | |
546: T_F_to_T(j_1__, j_2__) = vals_r__[pos__++]; | |
547: } | |
548: } | |
549: | |
550: current_statement_begin__ = 66; | |
551: validate_non_negative_index("F_no_F", "N_F_NR_F", N_F_NR_F); | |
552: validate_non_negative_index("F_no_F", "C", C); | |
553: context__.validate_dims("data initialization", "F_no_F", "matrix_d", context__.to_vec(N_F_NR_F,C)); | |
554: F_no_F = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_F_NR_F, C); | |
555: vals_r__ = context__.vals_r("F_no_F"); | |
556: pos__ = 0; | |
557: size_t F_no_F_j_2_max__ = C; | |
558: size_t F_no_F_j_1_max__ = N_F_NR_F; | |
559: for (size_t j_2__ = 0; j_2__ < F_no_F_j_2_max__; ++j_2__) { | |
560: for (size_t j_1__ = 0; j_1__ < F_no_F_j_1_max__; ++j_1__) { | |
561: F_no_F(j_1__, j_2__) = vals_r__[pos__++]; | |
562: } | |
563: } | |
564: | |
565: current_statement_begin__ = 67; | |
566: validate_non_negative_index("F_no_N", "N_F_NR_N", N_F_NR_N); | |
567: validate_non_negative_index("F_no_N", "C", C); | |
568: context__.validate_dims("data initialization", "F_no_N", "matrix_d", context__.to_vec(N_F_NR_N,C)); | |
569: F_no_N = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_F_NR_N, C); | |
570: vals_r__ = context__.vals_r("F_no_N"); | |
571: pos__ = 0; | |
572: size_t F_no_N_j_2_max__ = C; | |
573: size_t F_no_N_j_1_max__ = N_F_NR_N; | |
574: for (size_t j_2__ = 0; j_2__ < F_no_N_j_2_max__; ++j_2__) { | |
575: for (size_t j_1__ = 0; j_1__ < F_no_N_j_1_max__; ++j_1__) { | |
576: F_no_N(j_1__, j_2__) = vals_r__[pos__++]; | |
577: } | |
578: } | |
579: | |
580: current_statement_begin__ = 68; | |
581: validate_non_negative_index("F_N_to_F", "N_F_N_F", N_F_N_F); | |
582: validate_non_negative_index("F_N_to_F", "C", C); | |
583: context__.validate_dims("data initialization", "F_N_to_F", "matrix_d", context__.to_vec(N_F_N_F,C)); | |
584: F_N_to_F = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_F_N_F, C); | |
585: vals_r__ = context__.vals_r("F_N_to_F"); | |
586: pos__ = 0; | |
587: size_t F_N_to_F_j_2_max__ = C; | |
588: size_t F_N_to_F_j_1_max__ = N_F_N_F; | |
589: for (size_t j_2__ = 0; j_2__ < F_N_to_F_j_2_max__; ++j_2__) { | |
590: for (size_t j_1__ = 0; j_1__ < F_N_to_F_j_1_max__; ++j_1__) { | |
591: F_N_to_F(j_1__, j_2__) = vals_r__[pos__++]; | |
592: } | |
593: } | |
594: | |
595: current_statement_begin__ = 69; | |
596: validate_non_negative_index("F_N_to_N", "N_F_N_N", N_F_N_N); | |
597: validate_non_negative_index("F_N_to_N", "C", C); | |
598: context__.validate_dims("data initialization", "F_N_to_N", "matrix_d", context__.to_vec(N_F_N_N,C)); | |
599: F_N_to_N = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>(N_F_N_N, C); | |
600: vals_r__ = context__.vals_r("F_N_to_N"); | |
601: pos__ = 0; | |
602: size_t F_N_to_N_j_2_max__ = C; | |
603: size_t F_N_to_N_j_1_max__ = N_F_N_N; | |
604: for (size_t j_2__ = 0; j_2__ < F_N_to_N_j_2_max__; ++j_2__) { | |
605: for (size_t j_1__ = 0; j_1__ < F_N_to_N_j_1_max__; ++j_1__) { | |
606: F_N_to_N(j_1__, j_2__) = vals_r__[pos__++]; | |
607: } | |
608: } | |
609: | |
610: | |
611: // initialize transformed data variables | |
612: // execute transformed data statements | |
613: | |
614: // validate transformed data | |
615: | |
616: // validate, set parameter ranges | |
617: num_params_r__ = 0U; | |
618: param_ranges_i__.clear(); | |
619: current_statement_begin__ = 74; | |
620: num_params_r__ += 1; | |
621: current_statement_begin__ = 75; | |
622: num_params_r__ += 1; | |
623: current_statement_begin__ = 76; | |
624: num_params_r__ += 1; | |
625: current_statement_begin__ = 79; | |
626: num_params_r__ += 1; | |
627: current_statement_begin__ = 80; | |
628: num_params_r__ += 1; | |
629: current_statement_begin__ = 81; | |
630: num_params_r__ += 1; | |
631: current_statement_begin__ = 84; | |
632: num_params_r__ += 1; | |
633: current_statement_begin__ = 85; | |
634: num_params_r__ += 1; | |
635: current_statement_begin__ = 86; | |
636: num_params_r__ += 1; | |
637: } catch (const std::exception& e) { | |
638: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
639: // Next line prevents compiler griping about no return | |
640: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
641: } | |
642: } | |
643: | |
644: ~model3bf038e124f5_att_model1() { } | |
645: | |
646: | |
647: void transform_inits(const stan::io::var_context& context__, | |
648: std::vector<int>& params_i__, | |
649: std::vector<double>& params_r__, | |
650: std::ostream* pstream__) const { | |
651: typedef double local_scalar_t__; | |
652: stan::io::writer<double> writer__(params_r__, params_i__); | |
653: size_t pos__; | |
654: (void) pos__; // dummy call to supress warning | |
655: std::vector<double> vals_r__; | |
656: std::vector<int> vals_i__; | |
657: | |
658: current_statement_begin__ = 74; | |
659: if (!(context__.contains_r("gt_a"))) | |
660: stan::lang::rethrow_located(std::runtime_error(std::string("Variable gt_a missing")), current_statement_begin__, prog_reader__()); | |
661: vals_r__ = context__.vals_r("gt_a"); | |
662: pos__ = 0U; | |
663: context__.validate_dims("parameter initialization", "gt_a", "double", context__.to_vec()); | |
664: double gt_a(0); | |
665: gt_a = vals_r__[pos__++]; | |
666: try { | |
667: writer__.scalar_lub_unconstrain(0, 200, gt_a); | |
668: } catch (const std::exception& e) { | |
669: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gt_a: ") + e.what()), current_statement_begin__, prog_reader__()); | |
670: } | |
671: | |
672: current_statement_begin__ = 75; | |
673: if (!(context__.contains_r("gt_b"))) | |
674: stan::lang::rethrow_located(std::runtime_error(std::string("Variable gt_b missing")), current_statement_begin__, prog_reader__()); | |
675: vals_r__ = context__.vals_r("gt_b"); | |
676: pos__ = 0U; | |
677: context__.validate_dims("parameter initialization", "gt_b", "double", context__.to_vec()); | |
678: double gt_b(0); | |
679: gt_b = vals_r__[pos__++]; | |
680: try { | |
681: writer__.scalar_lub_unconstrain(0, 2, gt_b); | |
682: } catch (const std::exception& e) { | |
683: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gt_b: ") + e.what()), current_statement_begin__, prog_reader__()); | |
684: } | |
685: | |
686: current_statement_begin__ = 76; | |
687: if (!(context__.contains_r("p_e"))) | |
688: stan::lang::rethrow_located(std::runtime_error(std::string("Variable p_e missing")), current_statement_begin__, prog_reader__()); | |
689: vals_r__ = context__.vals_r("p_e"); | |
690: pos__ = 0U; | |
691: context__.validate_dims("parameter initialization", "p_e", "double", context__.to_vec()); | |
692: double p_e(0); | |
693: p_e = vals_r__[pos__++]; | |
694: try { | |
695: writer__.scalar_lub_unconstrain(0, 1, p_e); | |
696: } catch (const std::exception& e) { | |
697: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable p_e: ") + e.what()), current_statement_begin__, prog_reader__()); | |
698: } | |
699: | |
700: current_statement_begin__ = 79; | |
701: if (!(context__.contains_r("ot_a"))) | |
702: stan::lang::rethrow_located(std::runtime_error(std::string("Variable ot_a missing")), current_statement_begin__, prog_reader__()); | |
703: vals_r__ = context__.vals_r("ot_a"); | |
704: pos__ = 0U; | |
705: context__.validate_dims("parameter initialization", "ot_a", "double", context__.to_vec()); | |
706: double ot_a(0); | |
707: ot_a = vals_r__[pos__++]; | |
708: try { | |
709: writer__.scalar_lub_unconstrain(0, 200, ot_a); | |
710: } catch (const std::exception& e) { | |
711: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable ot_a: ") + e.what()), current_statement_begin__, prog_reader__()); | |
712: } | |
713: | |
714: current_statement_begin__ = 80; | |
715: if (!(context__.contains_r("ot_b"))) | |
716: stan::lang::rethrow_located(std::runtime_error(std::string("Variable ot_b missing")), current_statement_begin__, prog_reader__()); | |
717: vals_r__ = context__.vals_r("ot_b"); | |
718: pos__ = 0U; | |
719: context__.validate_dims("parameter initialization", "ot_b", "double", context__.to_vec()); | |
720: double ot_b(0); | |
721: ot_b = vals_r__[pos__++]; | |
722: try { | |
723: writer__.scalar_lub_unconstrain(0, 2, ot_b); | |
724: } catch (const std::exception& e) { | |
725: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable ot_b: ") + e.what()), current_statement_begin__, prog_reader__()); | |
726: } | |
727: | |
728: current_statement_begin__ = 81; | |
729: if (!(context__.contains_r("p_c"))) | |
730: stan::lang::rethrow_located(std::runtime_error(std::string("Variable p_c missing")), current_statement_begin__, prog_reader__()); | |
731: vals_r__ = context__.vals_r("p_c"); | |
732: pos__ = 0U; | |
733: context__.validate_dims("parameter initialization", "p_c", "double", context__.to_vec()); | |
734: double p_c(0); | |
735: p_c = vals_r__[pos__++]; | |
736: try { | |
737: writer__.scalar_lub_unconstrain(0, 1, p_c); | |
738: } catch (const std::exception& e) { | |
739: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable p_c: ") + e.what()), current_statement_begin__, prog_reader__()); | |
740: } | |
741: | |
742: current_statement_begin__ = 84; | |
743: if (!(context__.contains_r("it_a"))) | |
744: stan::lang::rethrow_located(std::runtime_error(std::string("Variable it_a missing")), current_statement_begin__, prog_reader__()); | |
745: vals_r__ = context__.vals_r("it_a"); | |
746: pos__ = 0U; | |
747: context__.validate_dims("parameter initialization", "it_a", "double", context__.to_vec()); | |
748: double it_a(0); | |
749: it_a = vals_r__[pos__++]; | |
750: try { | |
751: writer__.scalar_lub_unconstrain(0, 200, it_a); | |
752: } catch (const std::exception& e) { | |
753: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable it_a: ") + e.what()), current_statement_begin__, prog_reader__()); | |
754: } | |
755: | |
756: current_statement_begin__ = 85; | |
757: if (!(context__.contains_r("it_b"))) | |
758: stan::lang::rethrow_located(std::runtime_error(std::string("Variable it_b missing")), current_statement_begin__, prog_reader__()); | |
759: vals_r__ = context__.vals_r("it_b"); | |
760: pos__ = 0U; | |
761: context__.validate_dims("parameter initialization", "it_b", "double", context__.to_vec()); | |
762: double it_b(0); | |
763: it_b = vals_r__[pos__++]; | |
764: try { | |
765: writer__.scalar_lub_unconstrain(0, 2, it_b); | |
766: } catch (const std::exception& e) { | |
767: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable it_b: ") + e.what()), current_statement_begin__, prog_reader__()); | |
768: } | |
769: | |
770: current_statement_begin__ = 86; | |
771: if (!(context__.contains_r("p_d"))) | |
772: stan::lang::rethrow_located(std::runtime_error(std::string("Variable p_d missing")), current_statement_begin__, prog_reader__()); | |
773: vals_r__ = context__.vals_r("p_d"); | |
774: pos__ = 0U; | |
775: context__.validate_dims("parameter initialization", "p_d", "double", context__.to_vec()); | |
776: double p_d(0); | |
777: p_d = vals_r__[pos__++]; | |
778: try { | |
779: writer__.scalar_lub_unconstrain(0, 1, p_d); | |
780: } catch (const std::exception& e) { | |
781: stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable p_d: ") + e.what()), current_statement_begin__, prog_reader__()); | |
782: } | |
783: | |
784: params_r__ = writer__.data_r(); | |
785: params_i__ = writer__.data_i(); | |
786: } | |
787: | |
788: void transform_inits(const stan::io::var_context& context, | |
789: Eigen::Matrix<double, Eigen::Dynamic, 1>& params_r, | |
790: std::ostream* pstream__) const { | |
791: std::vector<double> params_r_vec; | |
792: std::vector<int> params_i_vec; | |
793: transform_inits(context, params_i_vec, params_r_vec, pstream__); | |
794: params_r.resize(params_r_vec.size()); | |
795: for (int i = 0; i < params_r.size(); ++i) | |
796: params_r(i) = params_r_vec[i]; | |
797: } | |
798: | |
799: | |
800: template <bool propto__, bool jacobian__, typename T__> | |
801: T__ log_prob(std::vector<T__>& params_r__, | |
802: std::vector<int>& params_i__, | |
803: std::ostream* pstream__ = 0) const { | |
804: | |
805: typedef T__ local_scalar_t__; | |
806: | |
807: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
808: (void) DUMMY_VAR__; // dummy to suppress unused var warning | |
809: | |
810: T__ lp__(0.0); | |
811: stan::math::accumulator<T__> lp_accum__; | |
812: try { | |
813: stan::io::reader<local_scalar_t__> in__(params_r__, params_i__); | |
814: | |
815: // model parameters | |
816: current_statement_begin__ = 74; | |
817: local_scalar_t__ gt_a; | |
818: (void) gt_a; // dummy to suppress unused var warning | |
819: if (jacobian__) | |
820: gt_a = in__.scalar_lub_constrain(0, 200, lp__); | |
821: else | |
822: gt_a = in__.scalar_lub_constrain(0, 200); | |
823: | |
824: current_statement_begin__ = 75; | |
825: local_scalar_t__ gt_b; | |
826: (void) gt_b; // dummy to suppress unused var warning | |
827: if (jacobian__) | |
828: gt_b = in__.scalar_lub_constrain(0, 2, lp__); | |
829: else | |
830: gt_b = in__.scalar_lub_constrain(0, 2); | |
831: | |
832: current_statement_begin__ = 76; | |
833: local_scalar_t__ p_e; | |
834: (void) p_e; // dummy to suppress unused var warning | |
835: if (jacobian__) | |
836: p_e = in__.scalar_lub_constrain(0, 1, lp__); | |
837: else | |
838: p_e = in__.scalar_lub_constrain(0, 1); | |
839: | |
840: current_statement_begin__ = 79; | |
841: local_scalar_t__ ot_a; | |
842: (void) ot_a; // dummy to suppress unused var warning | |
843: if (jacobian__) | |
844: ot_a = in__.scalar_lub_constrain(0, 200, lp__); | |
845: else | |
846: ot_a = in__.scalar_lub_constrain(0, 200); | |
847: | |
848: current_statement_begin__ = 80; | |
849: local_scalar_t__ ot_b; | |
850: (void) ot_b; // dummy to suppress unused var warning | |
851: if (jacobian__) | |
852: ot_b = in__.scalar_lub_constrain(0, 2, lp__); | |
853: else | |
854: ot_b = in__.scalar_lub_constrain(0, 2); | |
855: | |
856: current_statement_begin__ = 81; | |
857: local_scalar_t__ p_c; | |
858: (void) p_c; // dummy to suppress unused var warning | |
859: if (jacobian__) | |
860: p_c = in__.scalar_lub_constrain(0, 1, lp__); | |
861: else | |
862: p_c = in__.scalar_lub_constrain(0, 1); | |
863: | |
864: current_statement_begin__ = 84; | |
865: local_scalar_t__ it_a; | |
866: (void) it_a; // dummy to suppress unused var warning | |
867: if (jacobian__) | |
868: it_a = in__.scalar_lub_constrain(0, 200, lp__); | |
869: else | |
870: it_a = in__.scalar_lub_constrain(0, 200); | |
871: | |
872: current_statement_begin__ = 85; | |
873: local_scalar_t__ it_b; | |
874: (void) it_b; // dummy to suppress unused var warning | |
875: if (jacobian__) | |
876: it_b = in__.scalar_lub_constrain(0, 2, lp__); | |
877: else | |
878: it_b = in__.scalar_lub_constrain(0, 2); | |
879: | |
880: current_statement_begin__ = 86; | |
881: local_scalar_t__ p_d; | |
882: (void) p_d; // dummy to suppress unused var warning | |
883: if (jacobian__) | |
884: p_d = in__.scalar_lub_constrain(0, 1, lp__); | |
885: else | |
886: p_d = in__.scalar_lub_constrain(0, 1); | |
887: | |
888: // model body | |
889: { | |
890: current_statement_begin__ = 99; | |
891: local_scalar_t__ y(DUMMY_VAR__); | |
892: (void) y; // dummy to suppress unused var warning | |
893: stan::math::initialize(y, DUMMY_VAR__); | |
894: stan::math::fill(y, DUMMY_VAR__); | |
895: | |
896: current_statement_begin__ = 100; | |
897: local_scalar_t__ T(DUMMY_VAR__); | |
898: (void) T; // dummy to suppress unused var warning | |
899: stan::math::initialize(T, DUMMY_VAR__); | |
900: stan::math::fill(T, DUMMY_VAR__); | |
901: | |
902: current_statement_begin__ = 101; | |
903: local_scalar_t__ G(DUMMY_VAR__); | |
904: (void) G; // dummy to suppress unused var warning | |
905: stan::math::initialize(G, DUMMY_VAR__); | |
906: stan::math::fill(G, DUMMY_VAR__); | |
907: | |
908: current_statement_begin__ = 102; | |
909: local_scalar_t__ O(DUMMY_VAR__); | |
910: (void) O; // dummy to suppress unused var warning | |
911: stan::math::initialize(O, DUMMY_VAR__); | |
912: stan::math::fill(O, DUMMY_VAR__); | |
913: | |
914: current_statement_begin__ = 103; | |
915: local_scalar_t__ y_pdf(DUMMY_VAR__); | |
916: (void) y_pdf; // dummy to suppress unused var warning | |
917: stan::math::initialize(y_pdf, DUMMY_VAR__); | |
918: stan::math::fill(y_pdf, DUMMY_VAR__); | |
919: | |
920: | |
921: current_statement_begin__ = 117; | |
922: for (int i = 1; i <= N_T_NR_T; ++i) { | |
923: | |
924: current_statement_begin__ = 118; | |
925: stan::math::assign(T, get_base1(T_no_T, i, 1, "T_no_T", 1)); | |
926: current_statement_begin__ = 119; | |
927: stan::math::assign(G, (gamma_cdf_log(T, gt_a, gt_b) + p_e)); | |
928: current_statement_begin__ = 120; | |
929: lp_accum__.add(log_diff_exp(0, G)); | |
930: } | |
931: current_statement_begin__ = 123; | |
932: for (int i = 1; i <= N_T_NR_F; ++i) { | |
933: | |
934: current_statement_begin__ = 124; | |
935: stan::math::assign(T, get_base1(T_no_T, i, 1, "T_no_T", 1)); | |
936: current_statement_begin__ = 125; | |
937: stan::math::assign(G, (gamma_cdf_log(T, gt_a, gt_b) + p_e)); | |
938: current_statement_begin__ = 126; | |
939: lp_accum__.add((log_diff_exp(0, G) + wt_prob(T, it_a, it_b, ot_a, ot_b, p_c, p_d, pstream__))); | |
940: } | |
941: current_statement_begin__ = 129; | |
942: for (int i = 1; i <= N_T_F_F; ++i) { | |
943: | |
944: current_statement_begin__ = 130; | |
945: stan::math::assign(y, get_base1(T_F_to_F, i, 3, "T_F_to_F", 1)); | |
946: current_statement_begin__ = 131; | |
947: lp_accum__.add((gamma_log(y, gt_a, gt_b) + p_e)); | |
948: } | |
949: current_statement_begin__ = 134; | |
950: for (int i = 1; i <= N_T_T_F; ++i) { | |
951: | |
952: current_statement_begin__ = 135; | |
953: stan::math::assign(y, get_base1(T_T_to_F, i, 3, "T_T_to_F", 1)); | |
954: current_statement_begin__ = 136; | |
955: lp_accum__.add((gamma_log(y, gt_a, gt_b) + p_e)); | |
956: } | |
957: current_statement_begin__ = 139; | |
958: for (int i = 1; i <= N_T_F_T; ++i) { | |
959: | |
960: current_statement_begin__ = 140; | |
961: stan::math::assign(y, get_base1(T_F_to_T, i, 3, "T_F_to_T", 1)); | |
962: current_statement_begin__ = 141; | |
963: stan::math::assign(T, get_base1(T_F_to_T, i, 1, "T_F_to_T", 1)); | |
964: current_statement_begin__ = 142; | |
965: stan::math::assign(G, (gamma_cdf_log(T, gt_a, gt_b) + p_e)); | |
966: current_statement_begin__ = 143; | |
967: stan::math::assign(y_pdf, log_sum_exp((gamma_log(y, ot_a, ot_b) + p_c), yt_prob(y, it_a, it_b, ot_a, ot_b, T, p_c, p_d, pstream__))); | |
968: current_statement_begin__ = 144; | |
969: lp_accum__.add((log_diff_exp(0, G) + log_diff_exp(log_sum_exp((gamma_log(y, gt_a, gt_b) + p_e), y_pdf), ((gamma_log(y, gt_a, gt_b) + p_e) + y_pdf)))); | |
970: } | |
971: current_statement_begin__ = 148; | |
972: for (int i = 1; i <= N_F_NR_F; ++i) { | |
973: | |
974: current_statement_begin__ = 149; | |
975: stan::math::assign(T, get_base1(F_no_F, i, 1, "F_no_F", 1)); | |
976: current_statement_begin__ = 150; | |
977: stan::math::assign(G, (gamma_cdf_log(T, gt_a, gt_b) + p_e)); | |
978: current_statement_begin__ = 151; | |
979: lp_accum__.add(log_diff_exp(0, G)); | |
980: } | |
981: current_statement_begin__ = 154; | |
982: for (int i = 1; i <= N_F_NR_N; ++i) { | |
983: | |
984: current_statement_begin__ = 155; | |
985: stan::math::assign(T, get_base1(F_no_N, i, 1, "F_no_N", 1)); | |
986: current_statement_begin__ = 156; | |
987: stan::math::assign(G, (gamma_cdf_log(T, gt_a, gt_b) + p_e)); | |
988: current_statement_begin__ = 157; | |
989: stan::math::assign(O, gamma_cdf_log(T, ot_a, ot_b)); | |
990: current_statement_begin__ = 159; | |
991: lp_accum__.add((log_diff_exp(0, G) + log_sum_exp(log_diff_exp(0, O), (stan::math::log((ot_a / ot_b)) + log_diff_exp(0, p_c))))); | |
992: } | |
993: current_statement_begin__ = 162; | |
994: for (int i = 1; i <= N_F_N_F; ++i) { | |
995: | |
996: current_statement_begin__ = 163; | |
997: stan::math::assign(y, get_base1(F_N_to_F, i, 3, "F_N_to_F", 1)); | |
998: current_statement_begin__ = 164; | |
999: lp_accum__.add((gamma_log(y, gt_a, gt_b) + p_e)); | |
1000: } | |
1001: current_statement_begin__ = 167; | |
1002: for (int i = 1; i <= N_F_N_N; ++i) { | |
1003: | |
1004: current_statement_begin__ = 168; | |
1005: stan::math::assign(y, get_base1(F_N_to_N, i, 3, "F_N_to_N", 1)); | |
1006: current_statement_begin__ = 169; | |
1007: stan::math::assign(G, (gamma_cdf_log(T, gt_a, gt_b) + p_e)); | |
1008: current_statement_begin__ = 170; | |
1009: lp_accum__.add(((log_sum_exp((gamma_log(y, gt_a, gt_b) + p_e), log_diff_exp(0, G)) + gamma_log(y, ot_a, ot_b)) + p_c)); | |
1010: } | |
1011: } | |
1012: | |
1013: } catch (const std::exception& e) { | |
1014: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
1015: // Next line prevents compiler griping about no return | |
1016: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
1017: } | |
1018: | |
1019: lp_accum__.add(lp__); | |
1020: return lp_accum__.sum(); | |
1021: | |
1022: } // log_prob() | |
1023: | |
1024: template <bool propto, bool jacobian, typename T_> | |
1025: T_ log_prob(Eigen::Matrix<T_,Eigen::Dynamic,1>& params_r, | |
1026: std::ostream* pstream = 0) const { | |
1027: std::vector<T_> vec_params_r; | |
1028: vec_params_r.reserve(params_r.size()); | |
1029: for (int i = 0; i < params_r.size(); ++i) | |
1030: vec_params_r.push_back(params_r(i)); | |
1031: std::vector<int> vec_params_i; | |
1032: return log_prob<propto,jacobian,T_>(vec_params_r, vec_params_i, pstream); | |
1033: } | |
1034: | |
1035: | |
1036: void get_param_names(std::vector<std::string>& names__) const { | |
1037: names__.resize(0); | |
1038: names__.push_back("gt_a"); | |
1039: names__.push_back("gt_b"); | |
1040: names__.push_back("p_e"); | |
1041: names__.push_back("ot_a"); | |
1042: names__.push_back("ot_b"); | |
1043: names__.push_back("p_c"); | |
1044: names__.push_back("it_a"); | |
1045: names__.push_back("it_b"); | |
1046: names__.push_back("p_d"); | |
1047: } | |
1048: | |
1049: | |
1050: void get_dims(std::vector<std::vector<size_t> >& dimss__) const { | |
1051: dimss__.resize(0); | |
1052: std::vector<size_t> dims__; | |
1053: dims__.resize(0); | |
1054: dimss__.push_back(dims__); | |
1055: dims__.resize(0); | |
1056: dimss__.push_back(dims__); | |
1057: dims__.resize(0); | |
1058: dimss__.push_back(dims__); | |
1059: dims__.resize(0); | |
1060: dimss__.push_back(dims__); | |
1061: dims__.resize(0); | |
1062: dimss__.push_back(dims__); | |
1063: dims__.resize(0); | |
1064: dimss__.push_back(dims__); | |
1065: dims__.resize(0); | |
1066: dimss__.push_back(dims__); | |
1067: dims__.resize(0); | |
1068: dimss__.push_back(dims__); | |
1069: dims__.resize(0); | |
1070: dimss__.push_back(dims__); | |
1071: } | |
1072: | |
1073: template <typename RNG> | |
1074: void write_array(RNG& base_rng__, | |
1075: std::vector<double>& params_r__, | |
1076: std::vector<int>& params_i__, | |
1077: std::vector<double>& vars__, | |
1078: bool include_tparams__ = true, | |
1079: bool include_gqs__ = true, | |
1080: std::ostream* pstream__ = 0) const { | |
1081: typedef double local_scalar_t__; | |
1082: | |
1083: vars__.resize(0); | |
1084: stan::io::reader<local_scalar_t__> in__(params_r__, params_i__); | |
1085: static const char* function__ = "model3bf038e124f5_att_model1_namespace::write_array"; | |
1086: (void) function__; // dummy to suppress unused var warning | |
1087: | |
1088: // read-transform, write parameters | |
1089: double gt_a = in__.scalar_lub_constrain(0, 200); | |
1090: vars__.push_back(gt_a); | |
1091: | |
1092: double gt_b = in__.scalar_lub_constrain(0, 2); | |
1093: vars__.push_back(gt_b); | |
1094: | |
1095: double p_e = in__.scalar_lub_constrain(0, 1); | |
1096: vars__.push_back(p_e); | |
1097: | |
1098: double ot_a = in__.scalar_lub_constrain(0, 200); | |
1099: vars__.push_back(ot_a); | |
1100: | |
1101: double ot_b = in__.scalar_lub_constrain(0, 2); | |
1102: vars__.push_back(ot_b); | |
1103: | |
1104: double p_c = in__.scalar_lub_constrain(0, 1); | |
1105: vars__.push_back(p_c); | |
1106: | |
1107: double it_a = in__.scalar_lub_constrain(0, 200); | |
1108: vars__.push_back(it_a); | |
1109: | |
1110: double it_b = in__.scalar_lub_constrain(0, 2); | |
1111: vars__.push_back(it_b); | |
1112: | |
1113: double p_d = in__.scalar_lub_constrain(0, 1); | |
1114: vars__.push_back(p_d); | |
1115: | |
1116: double lp__ = 0.0; | |
1117: (void) lp__; // dummy to suppress unused var warning | |
1118: stan::math::accumulator<double> lp_accum__; | |
1119: | |
1120: local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
1121: (void) DUMMY_VAR__; // suppress unused var warning | |
1122: | |
1123: if (!include_tparams__ && !include_gqs__) return; | |
1124: | |
1125: try { | |
1126: if (!include_gqs__ && !include_tparams__) return; | |
1127: if (!include_gqs__) return; | |
1128: } catch (const std::exception& e) { | |
1129: stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
1130: // Next line prevents compiler griping about no return | |
1131: throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
1132: } | |
1133: } | |
1134: | |
1135: template <typename RNG> | |
1136: void write_array(RNG& base_rng, | |
1137: Eigen::Matrix<double,Eigen::Dynamic,1>& params_r, | |
1138: Eigen::Matrix<double,Eigen::Dynamic,1>& vars, | |
1139: bool include_tparams = true, | |
1140: bool include_gqs = true, | |
1141: std::ostream* pstream = 0) const { | |
1142: std::vector<double> params_r_vec(params_r.size()); | |
1143: for (int i = 0; i < params_r.size(); ++i) | |
1144: params_r_vec[i] = params_r(i); | |
1145: std::vector<double> vars_vec; | |
1146: std::vector<int> params_i_vec; | |
1147: write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); | |
1148: vars.resize(vars_vec.size()); | |
1149: for (int i = 0; i < vars.size(); ++i) | |
1150: vars(i) = vars_vec[i]; | |
1151: } | |
1152: | |
1153: static std::string model_name() { | |
1154: return "model3bf038e124f5_att_model1"; | |
1155: } | |
1156: | |
1157: | |
1158: void constrained_param_names(std::vector<std::string>& param_names__, | |
1159: bool include_tparams__ = true, | |
1160: bool include_gqs__ = true) const { | |
1161: std::stringstream param_name_stream__; | |
1162: param_name_stream__.str(std::string()); | |
1163: param_name_stream__ << "gt_a"; | |
1164: param_names__.push_back(param_name_stream__.str()); | |
1165: param_name_stream__.str(std::string()); | |
1166: param_name_stream__ << "gt_b"; | |
1167: param_names__.push_back(param_name_stream__.str()); | |
1168: param_name_stream__.str(std::string()); | |
1169: param_name_stream__ << "p_e"; | |
1170: param_names__.push_back(param_name_stream__.str()); | |
1171: param_name_stream__.str(std::string()); | |
1172: param_name_stream__ << "ot_a"; | |
1173: param_names__.push_back(param_name_stream__.str()); | |
1174: param_name_stream__.str(std::string()); | |
1175: param_name_stream__ << "ot_b"; | |
1176: param_names__.push_back(param_name_stream__.str()); | |
1177: param_name_stream__.str(std::string()); | |
1178: param_name_stream__ << "p_c"; | |
1179: param_names__.push_back(param_name_stream__.str()); | |
1180: param_name_stream__.str(std::string()); | |
1181: param_name_stream__ << "it_a"; | |
1182: param_names__.push_back(param_name_stream__.str()); | |
1183: param_name_stream__.str(std::string()); | |
1184: param_name_stream__ << "it_b"; | |
1185: param_names__.push_back(param_name_stream__.str()); | |
1186: param_name_stream__.str(std::string()); | |
1187: param_name_stream__ << "p_d"; | |
1188: param_names__.push_back(param_name_stream__.str()); | |
1189: | |
1190: if (!include_gqs__ && !include_tparams__) return; | |
1191: | |
1192: if (include_tparams__) { | |
1193: } | |
1194: | |
1195: if (!include_gqs__) return; | |
1196: } | |
1197: | |
1198: | |
1199: void unconstrained_param_names(std::vector<std::string>& param_names__, | |
1200: bool include_tparams__ = true, | |
1201: bool include_gqs__ = true) const { | |
1202: std::stringstream param_name_stream__; | |
1203: param_name_stream__.str(std::string()); | |
1204: param_name_stream__ << "gt_a"; | |
1205: param_names__.push_back(param_name_stream__.str()); | |
1206: param_name_stream__.str(std::string()); | |
1207: param_name_stream__ << "gt_b"; | |
1208: param_names__.push_back(param_name_stream__.str()); | |
1209: param_name_stream__.str(std::string()); | |
1210: param_name_stream__ << "p_e"; | |
1211: param_names__.push_back(param_name_stream__.str()); | |
1212: param_name_stream__.str(std::string()); | |
1213: param_name_stream__ << "ot_a"; | |
1214: param_names__.push_back(param_name_stream__.str()); | |
1215: param_name_stream__.str(std::string()); | |
1216: param_name_stream__ << "ot_b"; | |
1217: param_names__.push_back(param_name_stream__.str()); | |
1218: param_name_stream__.str(std::string()); | |
1219: param_name_stream__ << "p_c"; | |
1220: param_names__.push_back(param_name_stream__.str()); | |
1221: param_name_stream__.str(std::string()); | |
1222: param_name_stream__ << "it_a"; | |
1223: param_names__.push_back(param_name_stream__.str()); | |
1224: param_name_stream__.str(std::string()); | |
1225: param_name_stream__ << "it_b"; | |
1226: param_names__.push_back(param_name_stream__.str()); | |
1227: param_name_stream__.str(std::string()); | |
1228: param_name_stream__ << "p_d"; | |
1229: param_names__.push_back(param_name_stream__.str()); | |
1230: | |
1231: if (!include_gqs__ && !include_tparams__) return; | |
1232: | |
1233: if (include_tparams__) { | |
1234: } | |
1235: | |
1236: if (!include_gqs__) return; | |
1237: } | |
1238: | |
1239: }; // model | |
1240: | |
1241: } // namespace | |
1242: | |
1243: typedef model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1 stan_model; | |
1244: | |
1245: #include <rstan/rstaninc.hpp> | |
1246: /** | |
1247: * Define Rcpp Module to expose stan_fit's functions to R. | |
1248: */ | |
1249: RCPP_MODULE(stan_fit4model3bf038e124f5_att_model1_mod){ | |
1250: Rcpp::class_<rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, | |
1251: boost::random::ecuyer1988> >("stan_fit4model3bf038e124f5_att_model1") | |
1252: // .constructor<Rcpp::List>() | |
1253: .constructor<SEXP, SEXP, SEXP>() | |
1254: // .constructor<SEXP, SEXP>() | |
1255: .method("call_sampler", | |
1256: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::call_sampler) | |
1257: .method("param_names", | |
1258: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::param_names) | |
1259: .method("param_names_oi", | |
1260: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::param_names_oi) | |
1261: .method("param_fnames_oi", | |
1262: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::param_fnames_oi) | |
1263: .method("param_dims", | |
1264: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::param_dims) | |
1265: .method("param_dims_oi", | |
1266: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::param_dims_oi) | |
1267: .method("update_param_oi", | |
1268: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::update_param_oi) | |
1269: .method("param_oi_tidx", | |
1270: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::param_oi_tidx) | |
1271: .method("grad_log_prob", | |
1272: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::grad_log_prob) | |
1273: .method("log_prob", | |
1274: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::log_prob) | |
1275: .method("unconstrain_pars", | |
1276: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::unconstrain_pars) | |
1277: .method("constrain_pars", | |
1278: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::constrain_pars) | |
1279: .method("num_pars_unconstrained", | |
1280: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::num_pars_unconstrained) | |
1281: .method("unconstrained_param_names", | |
1282: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::unconstrained_param_names) | |
1283: .method("constrained_param_names", | |
1284: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::constrained_param_names) | |
1285: .method("standalone_gqs", | |
1286: &rstan::stan_fit<model3bf038e124f5_att_model1_namespace::model3bf038e124f5_att_model1, boost::random::ecuyer1988>::standalone_gqs) | |
1287: ; | |
1288: } | |
1289: | |
1290: // declarations | |
1291: extern "C" { | |
1292: SEXP file3bf06e8c8186( ) ; | |
1293: } | |
1294: | |
1295: // definition | |
1296: | |
1297: SEXP file3bf06e8c8186( ){ | |
1298: return Rcpp::wrap("att_model1"); | |
1299: } | |
1300: | |
1301: |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment