Next | Prev | Up | Top | Contents | Index

Example 2: MPMD Example

In this example, in the PVM version, the slaves are sent all the slave tids by the master and they use these to determine their logical ordering among each other. The MPI slaves determine their logical ordering by the information available to them about their individual task ID and the master's task ID. This is just one of the many schemes by which this can be implemented.

Also, instead of packing and unpacking used in the MPI version, MPI derived datatypes could have been used.


MPMD in PVM Version--Master Task

#include <stdio.h>
#include "pvm3.h"
#define SLAVENAME "slave1"
main()
{
    int mytid;               /* my task id */
    int tids[32];            /* slave task ids */
    int n, nproc, numt, i, who, msgtype, nhost, narch;
    float data[100], result[32];
    struct pvmhostinfo *hostp[32];
    /* enroll in pvm */
    mytid = pvm_mytid();
    /* Set number of slaves to start */
    /* Can not do stdin from spawned task */
    if(pvm_parent() == PvmNoParent){
       puts("How many slave programs (1-32)?");
       scanf("%d", &nproc);
    }
    else{
       pvm_config(&nhost, &narch, hostp);
       nproc = nhost;
       if(nproc > 32) nproc = 32 ;
    }
    /* start up slave tasks */
    numt=pvm_spawn(SLAVENAME, (char**)0, 0, "", nproc, tids);
    if( numt < nproc ){
       printf("Trouble spawning slaves. Aborting. ");
       printf("Error codes are:\n");
       for( i=numt ; i<nproc ; i++ ) {
          printf("TID %d %d\n",i,tids[i]);
       }
       for( i=0 ; i<numt ; i++ ){
          pvm_kill(tids[i]);
       }
       pvm_exit();
       exit();
    }
    /* Begin User Program */
    n = 100;
    /* initialize_data( data, n ); */
    for( i=0 ; i<n ; i++ ){
       data[i] = 1;
    }
    /* Broadcast initial data to slave tasks */
    pvm_initsend(PvmDataDefault);
    pvm_pkint(&nproc, 1, 1);
    pvm_pkint(tids, nproc, 1);
    pvm_pkint(&n, 1, 1);
    pvm_pkfloat(data, n, 1);
    pvm_mcast(tids, nproc, 0);
    /* Wait for results from slaves */
    msgtype = 5;
    for( i=0 ; i<nproc ; i++ ){
       pvm_recv(-1, msgtype);
       pvm_upkint(&who, 1, 1);
       pvm_upkfloat(&result[who], 1, 1);
       printf("I got %f from %d\n",result[who],who);
    }
    /* Program Finished. Exit PVM before stopping */
    pvm_exit();
}

MPMD in PVM Version--Slave Task

#include <stdio.h>
#include "pvm3.h"
float work(int me, int n, float *data, int *tids, int nproc );
main()
{
    int mytid;       /* my task id */
    int tids[32];    /* task ids   */
    int n, me, i, nproc, master, msgtype;
    float data[100], result;
    float work();
    /* enroll in pvm */
    mytid = pvm_mytid();
    /* Receive data from master */
    msgtype = 0;
    pvm_recv(-1, msgtype);
    pvm_upkint(&nproc, 1, 1);
    pvm_upkint(tids, nproc, 1);
    pvm_upkint(&n, 1, 1);
    pvm_upkfloat(data, n, 1);
    /* Determine which slave I am (0 -- nproc-1) */
    for(i=0; i<nproc ; i++)
       if(mytid == tids[i])
          { me = i; break; }
    /* Do calculations with data */
    result = work(me, n, data, tids, nproc);
    /* Send result to master */
    pvm_initsend(PvmDataDefault);
    pvm_pkint(&me, 1, 1);
    pvm_pkfloat(&result, 1, 1);
    msgtype = 5;
    master = pvm_parent();
    pvm_send(master, msgtype);
    /* Program finished. Exit PVM before stopping */
    pvm_exit();
}
float
work(int me, int n, float *data, int *tids, int nproc )
/*Simple ex: slaves exchange data with left neighbor*/
{
    int i, dest;
    float psum = 0.0;
    float sum = 0.0;
    for(i=0 ; i<n ; i++){
       sum += me * data[i];
    }
    /*illustrate node-to-node communication*/
    pvm_initsend(PvmDataDefault);
    pvm_pkfloat(&sum, 1, 1);
    dest = me+1;
    if(dest == nproc) dest = 0;
    pvm_send(tids[dest], 22);
    pvm_recv(-1, 22);
    pvm_upkfloat(&psum, 1, 1);
    return(sum+psum);
}

MPMD in MPI Version--Master Task

#include <stdio.h>
#include <mpi.h>
main(int argc, char *argv[])
{
    int mytid;                  /* my task id */
    int n, nproc, ntasks, i, who, msgtype;
    float data[100], result[32];
    char sbuff[1000], rbuff[1000];
    int position;
    MPI_Status status;
    /* Initialize MPI */
    MPI_Init(&argc, &argv);
    /* Get our task id (our rank in the basic group) */
    MPI_Comm_rank(MPI_COMM_WORLD, &mytid);
    /* Get the number of MPI tasks and slaves */
    MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
    nproc = ntasks - 1;
    if(mytid == 0)
       printf("mytid = %d, ntasks = %d\n", mytid, ntasks);
    /* Begin User Program */
    n = 100;
    /* initialize_data( data, n ); */
    for( i=0 ; i<n ; i++ ){
       data[i] = 1;
    }
    /* Pack initial data to be sent to slave tasks */
    position = 0;
    MPI_Pack(&n, 1, MPI_INT, sbuff, 1000, &position,
             MPI_COMM_WORLD);
    MPI_Pack(data, 100, MPI_FLOAT, sbuff, 1000, &position,
             MPI_COMM_WORLD);
    /* Send initial data to slave tasks */
    msgtype = 0;
    for(i=0; i<ntasks; i++){
       if(i != mytid){
          MPI_Send(sbuff, position, MPI_PACKED, i, msgtype,
                   MPI_COMM_WORLD);
       }
    }
    /* Wait for results from slaves */
    msgtype = 5;
    for( i=0 ; i<nproc ; i++ ){
       MPI_Recv(rbuff, 1000, MPI_PACKED, MPI_ANY_SOURCE,
                msgtype, MPI_COMM_WORLD, &status);
       position = 0;
       MPI_Unpack(rbuff, 1000, &position, &who, 1, MPI_INT,
                  MPI_COMM_WORLD);
       MPI_Unpack(rbuff, 1000, &position, &result[who], 1,
                  MPI_FLOAT, MPI_COMM_WORLD);
       printf("I got %f from %d\n",result[who],who);
    }
    /* Program Finished. Exit MPI before stopping */
    MPI_Finalize();
}

MPMD in MPI Version--Slave Task

Note the use of the buffered sends in the slave task (MPI version). Using standard sends instead would lead to deadlock in MPI implementations that do not use buffering for standard sends.

#include <stdio.h>
#include <mpi.h>
float work(int mytid, int me, int n, float *data, int ntasks, int master)
main(int argc, char *argv[])
{
    int mytid;       /* my task id */
    int me;          /* logical ordering among slaves. */
    int n, i, ntasks, master, msgtype;
    float data[100], result;
    float work();
    char rbuff[1000], sbuff[1000];
    int position;
    MPI_Status status;
    /* Initialize MPI */
    MPI_Init(&argc, &argv);
    /* Get our task id (our rank in the basic group) */
    MPI_Comm_rank(MPI_COMM_WORLD, &mytid);
    /* Get the number of MPI tasks */
    MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
    /* Receive initial data from master. */
    msgtype = 0;
    MPI_Recv(rbuff, 1000, MPI_PACKED, MPI_ANY_SOURCE,
             msgtype, MPI_COMM_WORLD, &status);
    /* Find out master's task id. */
    master = status.MPI_SOURCE;
    /* Unpack data. */
    position = 0;
    MPI_Unpack(rbuff, 1000, &position, &n, 1, MPI_INT,
               MPI_COMM_WORLD);
    MPI_Unpack(rbuff, 1000, &position, data, n, MPI_FLOAT,
               MPI_COMM_WORLD);
    /* Determine which slave I am (value of me) */
    /* If mytid < master, me = mytid */
    /* Else me=mytid-1 */
    if(mytid > master)
       me = mytid-1;
    else
       me = mytid;
    /* Do calculations with data */
    result = work(mytid, me, n, data, ntasks, master);
    /* Pack result */
    position = 0;
    MPI_Pack(&me, 1, MPI_INT, sbuff, 1000, &position,
             MPI_COMM_WORLD);
    MPI_Pack(&result, 1, MPI_FLOAT, sbuff, 1000, &position,
             MPI_COMM_WORLD);
    /* Send result to master */
    msgtype = 5;
    MPI_Send(sbuff, position, MPI_PACKED, master, msgtype,
             MPI_COMM_WORLD);
    /* Program finished. Exit from MPI */
    MPI_Finalize();
}
float
work(int mytid, int me, int n, float *data, int ntasks, int master)
    /* Simple example: slaves exchange data with left
       neighbor (wrapping) */
{
    int i, dest;
    MPI_Status status;
    float psum = 0.0;
    float sum = 0.0;
    char outbuff[100];
    for(i=0 ; i<n ; i++){
       sum += me * data[i];
    }
    /* illustrate node-to-node communication */
    dest = mytid+1;
    if(dest == ntasks)
       dest=0;
    if(dest == master)
       dest++;
    MPI_Buffer_attach(outbuff, 100);
    MPI_Bsend(&sum, 1, MPI_FLOAT, dest, 22, MPI_COMM_WORLD);
    MPI_Recv(&psum, 1, MPI_FLOAT, MPI_ANY_SOURCE, 22,
             MPI_COMM_WORLD, &status);
    return(sum+psum);
}


Next | Prev | Up | Top | Contents | Index