Skip to content

Instantly share code, notes, and snippets.

@tilast
Created June 9, 2015 08:55
Show Gist options
  • Save tilast/192e1a612c4baa15b1ee to your computer and use it in GitHub Desktop.
Save tilast/192e1a612c4baa15b1ee to your computer and use it in GitHub Desktop.
gatherv/scatterv
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#define N 10
int main(int argc, char** argv) {
srand(time(NULL));
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Find out rank, size
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// We are assuming at least 2 processes for this task
if (world_size < 2) {
fprintf(stderr, "World size must be greater than 1 for %s\n", argv[0]);
MPI_Abort(MPI_COMM_WORLD, 1);
}
int elements_per_proc = N / world_size;
int difference = N - elements_per_proc * world_size;
int* chunk_sizes = (int*)(malloc(sizeof(int) * world_size));
int* displ = (int*)(malloc(sizeof(int) * world_size));
for(int i = 0; i < world_size; ++i)
{
chunk_sizes[i] = elements_per_proc;
displ[i] = i * elements_per_proc;
}
chunk_sizes[world_size - 1] += difference;
float* rand_nums, *rand_nums_new;
if (world_rank == 0) {
rand_nums = (float*)malloc(sizeof(double) * N);
rand_nums_new = (float*)malloc(sizeof(double) * N);
for(int i = 0; i < N; ++i)
{
rand_nums[i] = rand() % 100 - 50;
printf("%lf\n", rand_nums[i]);
}
printf("\n");
}
int current_recv_size = elements_per_proc + (world_rank == world_size - 1 ? difference : 0);
// Create a buffer that will hold a subset of the random numbers
float *sub_rand_nums = malloc(sizeof(float) * current_recv_size);
// Scatter the random numbers to all processes
MPI_Scatterv(rand_nums, chunk_sizes, displ, MPI_FLOAT, sub_rand_nums,
current_recv_size, MPI_FLOAT, 0, MPI_COMM_WORLD);
// printf("world_rank %d\n", world_rank);
// for(int i = 0; i < elements_per_proc; ++i)
// {
// printf("%lf\n", sub_rand_nums[i]);
// }
// printf("\n");
for(int i = 0; i < current_recv_size; ++i)
{
if((elements_per_proc * world_rank + i) % 2 == 1)
{
sub_rand_nums[i] = 0;
}
}
printf("\n");
// printf("world_rank %d\n", world_rank);
// for(int i = 0; i < elements_per_proc; ++i)
// {
// printf("%lf\n", sub_rand_nums[i]);
// }
// printf("\n");
MPI_Gatherv(sub_rand_nums, current_recv_size, MPI_FLOAT, rand_nums_new, chunk_sizes, displ, MPI_FLOAT, 0, MPI_COMM_WORLD);
if (world_rank == 0) {
for(int i = 0; i < N; ++i)
{
printf("%lf -> %lf\n", rand_nums[i], rand_nums_new[i]);
}
printf("\n");
}
MPI_Finalize();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment