Skip to content

Instantly share code, notes, and snippets.

@stephenjbarr
Created September 7, 2012 20:06
Show Gist options
  • Save stephenjbarr/3669201 to your computer and use it in GitHub Desktop.
Save stephenjbarr/3669201 to your computer and use it in GitHub Desktop.
mpi something
/////////////////////
// THE MPI SECTION //
/////////////////////
MPI::Init();
// Next, use GSL to optimize over this
nll_mpi(start_vector, ua, ub, Xmat, Ymat, TASK_PARAM, false);
VectorXd test_vector = start_vector;
double INCREMENT = 0.1;
double my_result;
rank = MPI::COMM_WORLD.Get_rank();
size = MPI::COMM_WORLD.Get_size();
for(int vec_idx = 0; vec_idx < SP.N_params; vec_idx++ ) {
cout << "========================================" << endl;
cout << "MODIFYING start_vector POSITION " << vec_idx << " POSITIVE" << endl;
// POSITIVE
for(int i = 0; i < 100; ++i) {
test_vector(vec_idx) = start_vector(vec_idx) + (double(i) * INCREMENT);
my_result = nll_mpi(test_vector,ua, ub, Xmat, Ymat, TASK_PARAM, false);
cout << vec_idx << "," << i << ": " << test_vector.transpose() << " : " << my_result << endl;
}
cout << "========================================" << endl;
cout << "MODIFYING start_vector POSITION " << vec_idx << " NEGATIVE" << endl;
// NEGATIVE
for (int i = 0; i < 100; ++i) {
test_vector(vec_idx) = start_vector(vec_idx) - (double(i) * INCREMENT);
my_result = nll_mpi(test_vector,ua, ub, Xmat, Ymat, TASK_PARAM, false);
cout << vec_idx << "," << i << ": " << test_vector.transpose() << " : " << my_result << endl;
}
}
MPI::Finalize();
// END MPI SECTION
double nll_mpi( const VectorXd& theta,
const MatrixXd& ua,
const MatrixXd& ub,
const MatrixXd& Xmat,
const MatrixXd& Ymat,
task_param TP,
bool OUTPUT=false
) {
const int root = 0;
int n = ua.rows();
int nr = ua.cols();
int rank;
int size;
int remaining_tasks = TP.N_TASKS;
rank = MPI::COMM_WORLD.Get_rank();
size = MPI::COMM_WORLD.Get_size();
int row_ary[2];
int * row_ary_ptr = row_ary;
int START_ROW = TP.START_ROW;
int END_ROW = TP.END_ROW;
int Nslaves = size - 1;
int cur_task_size;
double TOTAL_NLL_SUM[1];
double * recv_buffer = TOTAL_NLL_SUM;
double NLL_SUBTOTAL[1];
double * send_buffer = NLL_SUBTOTAL;
if(rank == 0) {
// SUBDIVIDE THE MATRICES INTO ROWS
for(int ii = 1; ii <= Nslaves; ++ii) {
cur_task_size = ( (remaining_tasks > (2 * (n/Nslaves)))) ?
n/Nslaves : remaining_tasks;
row_ary[START_ROW] = (ii-1)*(n/Nslaves);
row_ary[END_ROW] = ((ii-1)*(n/Nslaves) + cur_task_size - 1);
remaining_tasks -= cur_task_size;
// MPI_send to rank ii the start and end row
MPI_Send(row_ary_ptr, 2, MPI_INT, ii, 0, MPI::COMM_WORLD);
}
} else {
// RECEIVE ROW ASSIGNMENT, CALCULATE NLL FOR ROW GROUP
// RECEIVE AND OUTPUT ASSIGNMENT
MPI_Recv(row_ary_ptr, 2, MPI_INT, root, 0, MPI::COMM_WORLD, NULL);
cur_task_size = row_ary_ptr[END_ROW] - row_ary_ptr[START_ROW] + 1;
if(OUTPUT) {
cout << "Workstation " << rank << " gets " << cur_task_size;
cout << " " << "Firms: " << row_ary_ptr[START_ROW] << " to " << row_ary_ptr[END_ROW] << endl;
}
// FOR EACH ROW, CALCULATE THE NLL
double total_sub_nll = 0.0;
double ll = 0.0;
for(int rii = row_ary_ptr[START_ROW];
rii <= row_ary_ptr[END_ROW];
++rii) {
ll = nll_singleton(rii, theta, ua, ub, Xmat, Ymat);
total_sub_nll += ll;
if(rii < 0) {
cout << "========================================" << endl;
cout << "rii: " << rii << endl;
cout << "ua: " << ua.row(rii) << endl;
cout << "ub: " << ub.row(rii) << endl;
cout << "Xmat: " << Xmat.row(rii) << endl;
cout << "Ymat: " << Ymat.row(rii) << endl;
}
// cout << "ll: " << ll << endl;
}
total_sub_nll *= -1.0;
if(OUTPUT) {
cout << "Workstation " << rank << " calcs " << total_sub_nll << endl;
}
send_buffer[0] = total_sub_nll;
}
MPI_Reduce(send_buffer, recv_buffer, 1, MPI_DOUBLE,
MPI_SUM, root, MPI::COMM_WORLD);
// if(rank == 0) {
// cout << "TOTAL NLL: " << recv_buffer[0] << endl;
// }
MPI_Barrier(MPI::COMM_WORLD);
return recv_buffer[0];
}
@stephenjbarr
Copy link
Author

double nll_mpi( const VectorXd& theta,
const MatrixXd& ua,
const MatrixXd& ub,
const MatrixXd& Xmat,
const MatrixXd& Ymat,
task_param TP,
bool OUTPUT=false
) {

const int root = 0;
int n = ua.rows();
int nr = ua.cols();
int rank;
int size;
int remaining_tasks = TP.N_TASKS;
rank = MPI::COMM_WORLD.Get_rank();
size = MPI::COMM_WORLD.Get_size();

int row_ary[2];
int * row_ary_ptr = row_ary;
int START_ROW = TP.START_ROW;
int END_ROW = TP.END_ROW;
int Nslaves = size - 1;
int cur_task_size;
double TOTAL_NLL_SUM[1];
double * recv_buffer = TOTAL_NLL_SUM;

double NLL_SUBTOTAL[1];
double * send_buffer = NLL_SUBTOTAL;

if(rank == 0) {
// SUBDIVIDE THE MATRICES INTO ROWS
for(int ii = 1; ii <= Nslaves; ++ii) {
    cur_task_size = ( (remaining_tasks > (2 * (n/Nslaves)))) ? 
    n/Nslaves :  remaining_tasks;

    row_ary[START_ROW] = (ii-1)*(n/Nslaves);
    row_ary[END_ROW] = ((ii-1)*(n/Nslaves) + cur_task_size - 1);

    remaining_tasks -= cur_task_size;

    // MPI_send to rank ii the start and end row
    MPI_Send(row_ary_ptr, 2, MPI_INT, ii, 0, MPI::COMM_WORLD);
}
} else {
// RECEIVE ROW ASSIGNMENT, CALCULATE NLL FOR ROW GROUP

// RECEIVE AND OUTPUT ASSIGNMENT
MPI_Recv(row_ary_ptr, 2, MPI_INT, root, 0, MPI::COMM_WORLD, NULL);
cur_task_size = row_ary_ptr[END_ROW] - row_ary_ptr[START_ROW] + 1;
if(OUTPUT) {
  cout << "Workstation " << rank << " gets " << cur_task_size;
  cout << "  " << "Firms: " << row_ary_ptr[START_ROW] << " to " << row_ary_ptr[END_ROW] << endl;
}


// FOR EACH ROW, CALCULATE THE NLL
double total_sub_nll = 0.0;
double ll = 0.0;
for(int rii = row_ary_ptr[START_ROW];
    rii <= row_ary_ptr[END_ROW];
    ++rii) {
    ll = nll_singleton(rii, theta, ua, ub, Xmat, Ymat);
    total_sub_nll += ll;
    if(rii < 0) {
    cout << "========================================" << endl;
    cout << "rii: " << rii << endl;
    cout << "ua: " << ua.row(rii) << endl;
    cout << "ub: " << ub.row(rii) << endl;
    cout << "Xmat: " << Xmat.row(rii) << endl;
    cout << "Ymat: " << Ymat.row(rii) << endl;

    }
    // cout << "ll: " << ll << endl;

}
total_sub_nll *= -1.0;

if(OUTPUT) {
  cout << "Workstation " << rank << " calcs " << total_sub_nll << endl;
}
send_buffer[0] = total_sub_nll;

}

MPI_Reduce(send_buffer, recv_buffer, 1, MPI_DOUBLE, 
       MPI_SUM, root, MPI::COMM_WORLD);

// if(rank == 0) {
//  cout << "TOTAL NLL: " << recv_buffer[0] << endl;
// }

MPI_Barrier(MPI::COMM_WORLD);

return recv_buffer[0];

}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment