Skip to content

Instantly share code, notes, and snippets.

@keichi
Last active November 17, 2019 19:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save keichi/b9888afcf32443a916266540e9bde38d to your computer and use it in GitHub Desktop.
Save keichi/b9888afcf32443a916266540e9bde38d to your computer and use it in GitHub Desktop.
ADIOS2 Multi-block write test
cmake_minimum_required(VERSION 3.10)
set(CMAKE_CXX_STANDARD 11)
project(test C CXX)
find_package(MPI REQUIRED)
find_package(ADIOS2 REQUIRED)
# We are not using the C++ API of MPI, this will stop the compiler look for it
add_definitions(-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX)
add_executable(writer writer.cpp)
target_link_libraries(writer adios2::adios2 MPI::MPI_C)
add_executable(reader reader.cpp)
target_link_libraries(reader adios2::adios2 MPI::MPI_C)
#include <iostream>
#include <chrono>
#include <thread>
#include <vector>
#include <adios2.h>
#include <mpi.h>
const size_t NUM_BLOCKS = 4;
const size_t BLOCK_SIZE = 3;
int main(int argc, char **argv)
{
MPI_Init(&argc, &argv);
int rank, size, wrank;
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
const unsigned int color = 2;
MPI_Comm comm;
MPI_Comm_split(MPI_COMM_WORLD, color, wrank, &comm);
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
if (size != 1) {
std::cerr << "Only one reader is allowed" << std::endl;
MPI_Abort(MPI_COMM_WORLD, -1);
}
adios2::ADIOS adios(comm, adios2::DebugON);
adios2::IO io = adios.DeclareIO("SimulationOutput");
// io.SetEngine("BPFile");
// io.SetEngine("SST");
// io.SetEngine("InSituMPI");
// io.SetParameter("verbose", "5");
io.SetEngine("SSC");
adios2::Engine reader = io.Open("a.bp", adios2::Mode::Read);
std::vector<double> u;
for (int step = 0;; step++) {
adios2::StepStatus status = reader.BeginStep();
if (status != adios2::StepStatus::OK) {
break;
}
std::cout << "receiving step " << step << std::endl;
adios2::Variable<double> var = io.InquireVariable<double>("u");
u.resize(var.Shape()[0]);
for (size_t i = 0; i < var.Shape()[0] / BLOCK_SIZE; i++) {
var.SetSelection({{i * BLOCK_SIZE}, {BLOCK_SIZE}});
reader.Get(var, u.data() + i * BLOCK_SIZE);
}
// reader.Get(var, u.data());
reader.EndStep();
std::cout << "Received u is: ";
for (size_t i = 0; i < u.size(); i++) {
std::cout << u[i] << " ";
}
std::cout << std::endl;
for (int i = 0; i < u.size(); i++) {
if (u[i] != i + step) {
std::cerr << "Received data is incorrect" << std::endl;
break;
}
}
}
reader.Close();
MPI_Finalize();
}
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include <adios2.h>
#include <mpi.h>
const size_t NUM_BLOCKS = 4;
const size_t BLOCK_SIZE = 3;
const size_t LOCAL_ARRAY_SIZE = NUM_BLOCKS * BLOCK_SIZE;
int main(int argc, char **argv)
{
MPI_Init(&argc, &argv);
int rank, size, wrank;
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
const unsigned int color = 1;
MPI_Comm comm;
MPI_Comm_split(MPI_COMM_WORLD, color, wrank, &comm);
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
adios2::ADIOS adios(comm, adios2::DebugON);
adios2::IO io = adios.DeclareIO("SimulationOutput");
// io.SetEngine("BPFile");
// io.SetEngine("SST");
// io.SetEngine("InSituMPI");
// io.SetParameter("verbose", "5");
// io.SetParameter("RendezvousReaderCount", "0");
// io.SetParameter("QueueLimit", "2");
io.SetEngine("SSC");
adios2::Variable<double> var = io.DefineVariable<double>(
"u", {LOCAL_ARRAY_SIZE * size}, {0}, {LOCAL_ARRAY_SIZE});
adios2::Engine writer = io.Open("a.bp", adios2::Mode::Write);
// Create a 1D array u with LOCAL_ARRAY_SIZE elements
std::vector<double> u(LOCAL_ARRAY_SIZE);
for (int step = 0; step < 10000; step++) {
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
// Start time step
writer.BeginStep();
// Fill buffer
for (int i = 0; i < LOCAL_ARRAY_SIZE; i++) {
u[i] = i + step + rank * LOCAL_ARRAY_SIZE;
}
std::cout << "writer " << rank << " writing step " << step << std::endl;
// Divide u into NUM_BLOCKS blocks and send them to the reader
for (size_t i = 0; i < NUM_BLOCKS; i++) {
var.SetSelection(
{{i * BLOCK_SIZE + rank * LOCAL_ARRAY_SIZE}, {BLOCK_SIZE}});
writer.Put(var, u.data() + i * BLOCK_SIZE);
}
writer.EndStep();
}
writer.Close();
MPI_Finalize();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment