Skip to content

Instantly share code, notes, and snippets.

@Osmose
Created February 10, 2011 02:36
Show Gist options
  • Save Osmose/819811 to your computer and use it in GitHub Desktop.
Save Osmose/819811 to your computer and use it in GitHub Desktop.
#include "mpi.h"
#include <stdio.h>
#include <math.h>
int main( int argc, char *argv[])
{
int n, i, k;
double PI25DT = 3.141592653589793238462643;
double pi, h, sum, x, recv_sum;
int numprocs, myid;
double startTime, endTime;
MPI_Status stat;
/* Initialize MPI and get number of processes and my number or rank*/
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
/* Processor zero sets the number of intervals and starts its clock*/
if (myid==0)
{
n=100000000 * numprocs;
startTime=MPI_Wtime();
for (k = 1; k < numprocs; k++) {
MPI_Send(&n, 1, MPI_INT, k, k, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&n, 1, MPI_INT, 0, myid, MPI_COMM_WORLD, &stat);
}
/* Calculate the width of intervals */
h = 1.0 / (double) n;
/* Initialize sum */
sum = 0.0;
/* Step over each inteval I own */
for (i = myid+1; i <= n; i += numprocs)
{
/* Calculate midpoint of interval */
x = h * ((double)i - 0.5);
/* Add rectangle's area = height*width = f(x)*h */
sum += (4.0/(1.0+x*x))*h;
}
/* Get sum total on processor zero */
if (myid == 0) {
pi = sum;
for (k = 1; k < numprocs; k++) {
MPI_Recv(&recv_sum, 1, MPI_DOUBLE, k, k, MPI_COMM_WORLD, &stat);
pi += recv_sum;
}
} else {
MPI_Send(&sum, 1, MPI_DOUBLE, 0, myid, MPI_COMM_WORLD);
}
/* Print approximate value of pi and runtime*/
if (myid==0)
{
printf("pi is approximately %.16f, Error is %e\n",
pi, fabs(pi - PI25DT));
endTime=MPI_Wtime();
printf("runtime is=%.16f",endTime-startTime);
}
MPI_Finalize();
return 0;
}
[mkelly01@blueshark mod_100m]$ cd ../mod
[mkelly01@blueshark mod]$ cat pcpi.c
#include "mpi.h"
#include <stdio.h>
#include <math.h>
int main( int argc, char *argv[])
{
int n, i, k;
double PI25DT = 3.141592653589793238462643;
double pi, h, sum, x, recv_sum;
int numprocs, myid;
double startTime, endTime;
MPI_Status stat;
/* Initialize MPI and get number of processes and my number or rank*/
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
/* Processor zero sets the number of intervals and starts its clock*/
if (myid==0)
{
n=600000000;
startTime=MPI_Wtime();
for (k = 1; k < numprocs; k++) {
MPI_Send(&n, 1, MPI_INT, k, k, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&n, 1, MPI_INT, 0, myid, MPI_COMM_WORLD, &stat);
}
/* Calculate the width of intervals */
h = 1.0 / (double) n;
/* Initialize sum */
sum = 0.0;
/* Step over each inteval I own */
for (i = myid+1; i <= n; i += numprocs)
{
/* Calculate midpoint of interval */
x = h * ((double)i - 0.5);
/* Add rectangle's area = height*width = f(x)*h */
sum += (4.0/(1.0+x*x))*h;
}
/* Get sum total on processor zero */
if (myid == 0) {
pi = sum;
for (k = 1; k < numprocs; k++) {
MPI_Recv(&recv_sum, 1, MPI_DOUBLE, k, k, MPI_COMM_WORLD, &stat);
pi += recv_sum;
}
} else {
MPI_Send(&sum, 1, MPI_DOUBLE, 0, myid, MPI_COMM_WORLD);
}
/* Print approximate value of pi and runtime*/
if (myid==0)
{
printf("pi is approximately %.16f, Error is %e\n",
pi, fabs(pi - PI25DT));
endTime=MPI_Wtime();
printf("runtime is=%.16f",endTime-startTime);
}
MPI_Finalize();
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment