Created
April 21, 2013 20:10
-
-
Save anonymous/5430902 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// ParallelSort.c | |
// | |
// Copyright 2011 Armel-Drouot <armel.bourgon-drouot@esial.net> | |
// | |
// This program is free software; you can redistribute it and/or modify | |
// it under the terms of the GNU General Public License as published by | |
// the Free Software Foundation; either version 2 of the License, or | |
// (at your option) any later version. | |
// | |
// This program is distributed in the hope that it will be useful, | |
// but WITHOUT ANY WARRANTY; without even the implied warranty of | |
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
// GNU General Public License for more details. | |
// | |
// You should have received a copy of the GNU General Public License | |
// along with this program; if not, write to the Free Software | |
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | |
// MA 02110-1301, Uhttp://www.dSA. | |
/* | |
* This program is an implementation of the parallel odd-even transposition | |
* sort algorithm for distributed memory system. Tested with Open-MPI. | |
* It create a random array of N integers and sort it. It assume N is a multiple | |
* of the number of hosts you specify when compiling. | |
* | |
* 1 - The Odd-even transposition sort algorithm starts by | |
* distributing n/p sub-lists (p is the number of processors) to | |
* all the processors. | |
* | |
* 2 - Each processor then sequentially sorts its | |
* sub-list locally. | |
* | |
* 3 - The algorithm then operates by alternating | |
* between an odd and an even phase. | |
* | |
* 1 - In the even phase, even numbered processors | |
* (processor i) communicate with the next odd numbered | |
* processors (processor i+1). In this communication process, | |
* the two sub-lists for each 2 communicating processes are | |
* merged together. The upper half of the list is then kept in | |
* the higher number processor and the lower half is put in the | |
* lower number processor. | |
* | |
* 2 - In the odd phase, odd number processors (processor i) | |
* communicate with the previous even number processors (i-1) | |
* in exactly the same way as in the even phase. | |
* | |
*/ | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <time.h> | |
#include <mpi.h> | |
#include <math.h> | |
#define N 128000 | |
/*Parameters : an array of size size | |
*Sort the array with the sequential odd-even sort | |
*/ | |
void sequentialSort(int *arrayToSort, int size) { | |
int sorted = 0; | |
while( sorted == 0) { | |
sorted= 1; | |
int i; | |
for(i=1;i<size-1; i += 2) { | |
if(arrayToSort[i] > arrayToSort[i+1]) | |
{ | |
int temp = arrayToSort[i+1]; | |
arrayToSort[i+1] = arrayToSort[i]; | |
arrayToSort[i] = temp; | |
sorted = 0; | |
} | |
} | |
for(i=0;i<size-1;i+=2) { | |
if(arrayToSort[i] > arrayToSort[i+1]) | |
{ | |
int temp = arrayToSort[i+1]; | |
arrayToSort[i+1] = arrayToSort[i]; | |
arrayToSort[i] = temp; | |
sorted = 0; | |
} | |
} | |
} | |
} | |
/*Parameters : two sorted array of size size. | |
* The lower half of the two array is put on the first array | |
*/ | |
void lower(int *array1, int *array2, int size) | |
{ | |
//create a new temp array | |
int *new = malloc(size*sizeof(int)); | |
int k = 0; | |
int l = 0; | |
int count; | |
for(count=0;count<size;count++) { | |
if (array1[k] <= array2[l] ) { | |
new[count] = array1[k]; | |
k++; | |
} else { | |
new[count] = array2[l]; | |
l++; | |
} | |
} | |
//copy new value in the first array | |
for(count=0;count<size;count++) { | |
array1[count] = new[count]; | |
} | |
free(new); | |
} | |
/*Parameters : two sorted array of size size. | |
* The higher half of the two array is put on the first array | |
*/ | |
void higher(int *array1, int *array2, int size) | |
{ | |
//create a new temp array | |
int *new = malloc(size*sizeof(int)); | |
int k = size-1; | |
int l = size-1; | |
int count; | |
for(count=size-1;count>=0;count--) { | |
if (array1[k] >= array2[l] ) { | |
new[count] = array1[k]; | |
k--; | |
} else { | |
new[count] = array2[l]; | |
l--; | |
} | |
} | |
//copy new value in the first array | |
for(count=0;count<size;count++) { | |
array1[count] = new[count]; | |
} | |
free(new); | |
} | |
/* Parameter : | |
* subArray : an integer array | |
* size : the size of the integer | |
* rank : the rank of the host | |
* Send to the next host an array, recieve array from the next host | |
* keep the lower part of the 2 array | |
*/ | |
void exchangeWithNext(int *subArray, int size, int rank) | |
{ | |
MPI_Send(subArray,size,MPI_INT,rank+1,0,MPI_COMM_WORLD); | |
/* recieve data from the next odd numbered host */ | |
int *nextArray = malloc(size*sizeof(int)); | |
MPI_Status stat; | |
MPI_Recv(nextArray,size,MPI_INT,rank+1,0,MPI_COMM_WORLD,&stat); | |
/* Keep the lower half of subArray and nextArray */ | |
lower(subArray,nextArray,size); | |
free(nextArray); | |
} | |
/* Parameter : | |
* subArray : an integer array | |
* size : the size of the integer | |
* rank : the rank of the host | |
* Send to the previous host an array, recieve array from the previous host | |
* keep the higher part of the 2 array | |
*/ | |
void exchangeWithPrevious(int *subArray, int size, int rank) | |
{ | |
/* send our sub-array to the previous host*/ | |
MPI_Send(subArray,size,MPI_INT,rank-1,0,MPI_COMM_WORLD); | |
/* recieve data from the previous host */ | |
int *previousArray = malloc(size*sizeof(int)); | |
MPI_Status stat; | |
MPI_Recv(previousArray,size,MPI_INT,rank-1,0,MPI_COMM_WORLD,&stat); | |
/* Keep the higher half of subArray and previousArray */ | |
higher(subArray,previousArray,size); | |
free(previousArray); | |
} | |
int main(int argc, char **argv) | |
{ | |
//init MPI | |
MPI_Init(&argc,&argv); | |
//get the number of host you have | |
int hostCount; | |
MPI_Comm_size(MPI_COMM_WORLD,&hostCount); | |
//get the rank of the host where this code run | |
int rank; | |
MPI_Comm_rank(MPI_COMM_WORLD,&rank); | |
/* The first step is to randomly build an array | |
* Only one host do that. We call this host the master, | |
* is has the rank 0. | |
*/ | |
// arrayToSort is the array we will sort | |
int *arrayToSort = malloc(N * sizeof(int)); | |
if(rank == 0) { | |
srand(time(NULL)); | |
int i; | |
for(i=0;i<N;i++) { | |
arrayToSort[i] = rand()/100000; | |
} | |
} | |
/* The sorting process start here. | |
* Distribute sub-array to all the hosts. | |
* There are hostCount sub-array of size N/hostCount. | |
*/ | |
/*init a timer on the master*/ | |
double start; | |
if (rank == 0) { | |
start = clock(); | |
} | |
int *subArray = malloc(N/hostCount * sizeof(int)); | |
if(rank == 0) { /* The master send data to everyone */ | |
/* send data, we use MPI_Scatter, prototype here : | |
* http://www.mcs.anl.gov/research/projects/mpi/www/www3/MPI_Scatter.html */ | |
MPI_Scatter(arrayToSort,N/hostCount,MPI_INT,subArray,N/hostCount,MPI_INT,0,MPI_COMM_WORLD); | |
} | |
/* All hosts recieve there sub-array | |
* we use MPI_Scatterv, prototype here : | |
* http://www.cc.gatech.edu/projects/ihpcl/mpichdoc/www3/MPI_Scatterv.html | |
*/ | |
/* Entry i of displs specifies the displacement of i sub-array relative to arrayToSort */ | |
int *displs = malloc(hostCount * sizeof(int)); | |
int i; | |
for (i=0;i<hostCount;i++) { | |
displs[i] = i*(N/hostCount); | |
} | |
/* sendcnts specifies the number of elements to send to each host. | |
* Because we set this number to N/hostCount we assume N is a multiple | |
* of hostCount | |
*/ | |
int *sendcnts = malloc(hostCount * sizeof(int)); | |
for (i=0;i<hostCount;i++) { | |
sendcnts[i]=N/hostCount; | |
} | |
/* reieve data */ | |
MPI_Scatterv(arrayToSort,sendcnts,displs,MPI_INT,subArray,N/hostCount,MPI_INT,0,MPI_COMM_WORLD); | |
free(displs); | |
free(sendcnts); | |
/* Each host sequentially sorts its sub-list */ | |
//subArray = merge_sort(subArray,N/hostCount); | |
sequentialSort(subArray,N/hostCount); | |
/*Then algorithm operates by alternating | |
* between an odd and an even phase. | |
*/ | |
i =0; | |
for(i=0;i<hostCount;i++) { | |
/* even phase */ | |
if (i%2==0) { | |
/* even numbered host */ | |
if(rank%2==0) { | |
/* even numbered host communicate with the next odd numbered host */ | |
/* make sure the next odd exits */ | |
if(rank<hostCount-1) { | |
/* send our sub-array to the next odd numbered host | |
* receive data from the next odd numbered host | |
* Keep the lower half of our array and of the next host array's | |
*/ | |
exchangeWithNext(subArray,N/hostCount,rank); | |
} | |
} else { | |
/* odd numbered host communicate with the previous even numbered host */ | |
/* make sure the previous even exits */ | |
if (rank-1 >=0 ) { | |
/* send our sub-array to the previous even numbered host | |
* receive data from the previous even numbered host | |
* Keep the higher half of our array and of the previous host array's | |
*/ | |
exchangeWithPrevious(subArray,N/hostCount,rank); | |
} | |
} | |
} | |
/* odd phase */ | |
else { | |
/* odd host */ | |
if(rank%2!=0) { | |
/* In the odd phase odd numbered host communicate with the next | |
* even numbered host make sure the next even exits */ | |
if (rank<hostCount-1) { | |
/* send our sub-array to the next even numbered host | |
* receive data from the next even numbered host | |
* Keep the lower half of our array and the next host array's | |
*/ | |
exchangeWithNext(subArray,N/hostCount,rank); | |
} | |
} | |
/* even host */ | |
else { | |
/* In the odd phase even numbered host communicate with the previous | |
* odd numbered host make sure the previous host exits */ | |
if (rank-1 >=0 ) { | |
/* send our sub-array to the previous odd numbered host | |
* receive data from the previous odd numbered host | |
* Keep the higher half of our array and of the previous host array's | |
*/ | |
exchangeWithPrevious(subArray,N/hostCount,rank); | |
} | |
} | |
} | |
} | |
/* gather the data from all host in the master. | |
* We use MPI_Gather, prototype here : | |
* http://www.mcs.anl.gov/research/projects/mpi/www/www3/MPI_Gather.html | |
*/ | |
MPI_Gather(subArray,N/hostCount,MPI_INT,arrayToSort,N/hostCount,MPI_INT,0,MPI_COMM_WORLD); | |
/* The master host display the sorted array */ | |
if(rank == 0) { | |
/* | |
printf("\nSorted array :\n"); | |
for (i=0;i<N;i++) { | |
printf("%d; ",arrayToSort[i]); | |
} | |
*/ | |
printf("\n"); | |
printf("sorted in %f s\n\n",((double)clock() - start) / CLOCKS_PER_SEC); | |
} | |
//dealoc MPI | |
MPI_Finalize(); | |
return 0; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment