Skip to content

Instantly share code, notes, and snippets.

@jcwright77
Last active August 26, 2019 14:31
Show Gist options
  • Save jcwright77/e7314b16bf3c9982e9fe771b034a911d to your computer and use it in GitHub Desktop.
Save jcwright77/e7314b16bf3c9982e9fe771b034a911d to your computer and use it in GitHub Desktop.
CPS-FR job example
#include "mpi.h"
#include <stdio.h>
#include <math.h>
//Build with
// module load gcc
// module load mpich/ge/gcc/64/3.1
// mpicc -o cpi cpi.c
// mpirun ./cpi
double f( double );
double f( double a )
{
return (4.0 / (1.0 + a*a));
}
int main( int argc, char *argv[])
{
int done = 0, n, myid, numprocs, i;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x;
double startwtime = 0.0, endwtime;
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d on %s\n",
myid, processor_name);
n = 0;
while (!done)
{
if (myid == 0)
{
/*
printf("Enter the number of intervals: (0 quits) ");
scanf("%d",&n);
*/
if (n==0) n=1024*numprocs; else n=0;
startwtime = MPI_Wtime();
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (n == 0)
done = 1;
else
{
h = 1.0 / (double) n;
sum = 0.0;
for (i = myid + 1; i <= n; i += numprocs)
{
x = h * ((double)i - 0.5);
sum += f(x);
}
mypi = h * sum;
MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (myid == 0)
{
printf("pi is approximately %.16f, Error is %.16f\n",
pi, fabs(pi - PI25DT));
endwtime = MPI_Wtime();
printf("wall clock time = %f\n",
endwtime-startwtime);
}
}
}
MPI_Finalize();
return 0;
}
#!/bin/bash
# submit with sbatch cpi_cpsfr.slurm
# commandline arguments may instead by supplied with #SBATCH <flag> <value>
# commandline arguments override these values
# Number of nodes
#SBATCH -N 1
# Number of processor core (32*32=1024, psfc, mit and emiliob nodes have 32 cores per node)
#SBATCH -n 32
# specify how long your job needs. Be HONEST, it affects how long the job may wait for its turn.
#SBATCH --time=0:04:00
# which partition or queue the jobs runs in
#SBATCH -p sched_mit_psfc_cpsfr
# for the CPS-FR class, we have a reservation
#SBATCH --reservation=psfc-cpsfr
#customize the name of the stderr/stdout file. %j is the job number
#SBATCH -o cpi-%j.out
#load default system modules
. /etc/profile.d/modules.sh
#load modules your job depends on.
#better here than in your $HOME/.bashrc to make debugging and requirements easier to track.
#here we are using gcc under MPI mpich
module load mpich/ge/gcc/64/3.1
#It is useful to echo the running environment
env
#Finally, the command to execute.
#The job starts in the directory it was submitted from.
#Note that mpirun knows from SLURM how many processor we have
#In this case, we use all processes.
mpirun ./cpi

Slurm script for CPS-FR 2019 class.

@jcwright77
Copy link
Author

Pi test for CPS-FR 2019

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment