[mpich-discuss] Problems with mpi spawn multiple
    fernando_luz 
    fernando_luz at tpn.usp.br
       
    Tue Nov  4 11:20:22 CST 2008
    
    
  
Yes, 
This code is based in a example code of MPI_Spawn in mpi forum homepage (
http://www.mpi-forum.org/docs/mpi-20-html/node98.htm#Node98 ). 
I adapted the code in C++ and to use mpi spawn multiple. This code work's
when I use with mpi spawn.
-------------------------------------------------------------------------------
manager_02 code:
#include <mpi.h>
#include <iostream>
#include <string>
#define MAX 128
MPI::Status status;
MPI::Intercomm everyone;
MPI::Info information[10];
int  size, rank, universe_size, *universe_sizep, n, n_proc[10],
size_machine;
bool flag;
	
char machine[MAX], program_name[4][MAX], buffer[MAX], s_wdir[MAX],
s_path[MAX], temp[MAX]; 
int main(int argc, char *argv[]){ 
	
  MPI::Init(argc, argv);
  size = MPI::COMM_WORLD.Get_size();
  rank = MPI::COMM_WORLD.Get_rank();
  
  MPI::Get_processor_name(machine,size_machine);
  
  if (size != 1)  std::cerr << "Top heavy with management" << std::endl; 
  
  flag = MPI::COMM_WORLD.Get_attr(MPI::UNIVERSE_SIZE, &universe_sizep);
  
  if (!flag) {
    std::cout << "This MPI does not support UNIVERSE_SIZE. How many\n
processes total?" << std::endl; 
    std::cin >> universe_size;
  }
  else universe_size = *universe_sizep;
  
  if (universe_size == 1) std::cerr << "Não tem como iniciar um processo
com universe_size igual a 1." << std::endl; 
  
  universe_size = 3;
  for (int i = 0; i < universe_size-1; i++){
    strcpy(program_name[i], "worker_02"); 
    information[i] = information[i].Create();
    information[i].Set("wdir","/home/fernando_luz/");
   
information[i].Set("path","/home/fernando_luz/SVN/TPN3/casos_testes/02/worker_02/");
    information[i].Set("host", "a54");
    n_proc[i] = 1;
  }
  everyone = MPI::COMM_WORLD.Spawn_multiple(universe_size-1, (const char**)
program_name, MPI::ARGVS_NULL, n_proc, information, rank);
  strcpy(buffer,"Who are you?");
  everyone.Bcast(&buffer, MAX, MPI::CHAR, rank);  
  
  for (int i=0; i < universe_size-1; i++){
    everyone.Recv(&buffer, MAX, MPI::CHAR, i, MPI::ANY_TAG, status);
    std::cout << "I am " << buffer << " with rank " << status.Get_source()
<< std::endl;
  }
  
  for (int i = 0; i < universe_size-1; i++){
    information[i].Free();
  }
  MPI::Finalize(); 
  return 0; 
} 
--------------------------------------------------------------------------------------------
============================================================================================
worker_02 code:
#include <mpi.h>
#include <iostream>
#include <string>
#define MAX 128
MPI::Status status;
MPI::Intercomm parent;
int  rank, size, size_machine;
char machine[MAX], buffer[MAX];
int main(int argc, char *argv[]){
  MPI::Init(argc, argv);
  parent = parent.Get_parent();
  
  MPI::Get_processor_name(machine,size_machine);
    
  if (parent == MPI::COMM_NULL) std::cerr << "No daddy!!" << std::endl;
  size = parent.Get_remote_size();
  if (size != 1) std::cerr << "Problems with daddy..." << std::endl;
  rank = parent.Get_rank();
  parent.Bcast(&buffer, MAX, MPI::CHAR, 0);
	
  std::cout << "Process " << rank << " received in broadcast: " << buffer
<< std::endl; 
  parent.Send(&machine, MAX, MPI::CHAR, 0, rank);
  MPI::Finalize(); 
  return 0; 
}
==============================================================================================
and I start the program with "mpiexec -configfile executa.txt" 
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
executa.txt file:
-host a53 -path /home/fernando_luz/SVN/TPN3/casos_testes/02/manager_02/
-wdir /home/fernando_luz/ -l -exitinfo manager_02
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Thanks for help
Fernando Luz
    
    
More information about the mpich-discuss
mailing list