Monday, February 25, 2008

Fortran paraller programming : job distributor to several processors

subroutine mpi_distrbt(max_ipara,nummpiprocsm,
& MPInumprocs,ipara_ini_fin)
ccccccccccccccccccccccccccccccc
cc This subroutine helps to distribute your total no of array dimension into the total number of
cc avaiable processor.
cc max_ipara = array dimension
cc
nummpiprocsm = maximum number of processor available in the cluster
cc
MPInumprocs = Number of processors you are using
cc
ipara_ini_fin = output: real, dimension (1:2,1:MPImyid), where MPImyid is the identification number for
cc the processor
cc
cc A typical code to demonstrate how to use this is given in the blog.
cc
cc ALL COPYRIGHT IS RESERVED by Tanmoy Das.
cc Created in 2006/2007.
cc
cc I thank Seppo Sharakorpi for creating the basic structure and the algorithm of this code.
cc
ccccccccccccccccccccccc
real :: dummy
integer :: nummpiprocsm,max_ipara
integer,dimension(0:nummpiprocsm) :: isendcounts
integer,dimension(1:2,0:nummpiprocsm) :: ipara_ini_fin
c
C THESE ARE THE START AND FINISH VALUES OF OLD LOOP, NOW PARALLELIZED
c
ipara_ini_tot=1
ipara_fin_tot=max_ipara
write(6,*)
write(6,*)' ipara ranging from ',
& ipara_ini_tot,' to ',ipara_fin_tot
write(6,*)

c broadcast tasks to slaves
nummpitask_tot=ipara_fin_tot-ipara_ini_tot+1
cc
write(6,*)'---------------------------------------------------'

write(6,*) 'MPI MASTER: Scattering total of ',nummpitask_tot,' tasks'
write(6,*)'---------------------------------------------------'
cc
if ( nummpitask_tot < style="color: rgb(255, 153, 255);">then

write(6,*)' SERIOUS ERROR:: nummpitask_tot <>'
write(6,*) ipara_ini_tot, ipara_fin_tot, MPInumprocs
endif
c
c
dummy=nummpitask_tot/MPInumprocs
islavetasks=floor(dummy)+1
isendcounts(0)=nummpitask_tot-(MPInumprocs-1)*islavetasks
cc
if ( isendcounts(0) > 0 ) then
ipara_ini_fin(1,0)=ipara_ini_tot
ipara_ini_fin(2,0)=ipara_ini_tot+isendcounts(0)-1
cc
do ii=1,(MPInumprocs-1)
isendcounts(ii)=islavetasks
ipara_ini_fin(1,ii)=ipara_ini_fin(2,ii-1)+1
ipara_ini_fin(2,ii)=ipara_ini_fin(1,ii)+isendcounts(ii)-1
end do
cc
write(6,*) isendcounts(0),' tasks for the master'
write(6,*) isendcounts(1),' tasks for each of the ',
& (MPInumprocs-1),' slaves'
write(6,*)
else
c if number of tasks is only slightly larger than MPInumprocs
islavetasks=floor(dummy)
isendcounts(0)=islavetasks
ipara_ini_fin(1,0)=ipara_ini_tot
ipara_ini_fin(2,0)=ipara_ini_tot+isendcounts(0)-1
idummy=nummpitask_tot-(MPInumprocs)*islavetasks
cc
do ii=1,(MPInumprocs-1)
isendcounts(ii)=islavetasks
cc
if ( ii <= idummy ) then isendcounts(ii)=isendcounts(ii)+1 end if
cc
ipara_ini_fin(1,ii)=ipara_ini_fin(2,ii-1)+1
ipara_ini_fin(2,ii)=ipara_ini_fin(1,ii)+isendcounts(ii)-1
end do
cc
write(6,*) isendcounts(0),' tasks for the master,'
write(6,*)isendcounts(1),' tasks for each of the first',
& idummy,' slaves and'
write(6,*)isendcounts(idummy+1),
& ' tasks for each of the other',
& (MPInumprocs-1)-idummy,' slaves'
write(6,*)

end if
cc
return
end subroutine