This subroutine creates a simple mesh based on a treeID list requires the setting of me%global before hand Works on the communicator specified in me%global%comm
Type | Intent | Optional | Attributes | Name | ||
---|---|---|---|---|---|---|
type(treelmesh_type), | intent(inout) | :: | me |
Structure to load the mesh to |
||
integer(kind=long_k), | intent(in) | :: | tIDlist(:) |
treeID list to generate the global list in the mesh from |
||
integer, | intent(in), | optional | :: | comm |
MPI Communicator to use, defaults to the one in me%global%comm if not specified |
|
character(len=*), | intent(in), | optional | :: | dirname |
Directory to store the mesh in. Is taken to be me%global%dirname if not specified |
subroutine treelmesh_fromList(me, tIDlist, comm, dirname) ! -------------------------------------------------------------------- ! !> Structure to load the mesh to type(treelmesh_type), intent(inout) :: me !> treeID list to generate the global list in the mesh from integer(kind=long_k), intent(in) :: tIDlist(:) !> MPI Communicator to use, defaults to the one in me%global%comm if not !! specified integer, intent(in), optional :: comm !> Directory to store the mesh in. Is taken to be me%global%dirname if not !! specified character(len=*), intent(in), optional :: dirname ! -------------------------------------------------------------------- ! integer :: commLoc integer(kind=long_k) :: offset, nElems integer :: iError integer :: nElemsList ! mpi variables integer :: commsize, rank ! -------------------------------------------------------------------- ! if (present(dirname)) then me%global%dirname = dirname end if if (present(comm)) then commLoc = comm else commLoc = me%global%comm end if ! determine number of treeIDs in list nElemsList = size(tIDlist) ! Get size of communicator and my rank in it call mpi_comm_size(commLoc, commsize, iError) call mpi_comm_rank(commLoc, rank, iError) me%global%nParts = commsize me%global%myPart = rank if (associated(me%property)) deallocate(me%property) allocate(me%Property(me%global%nProperties)) allocate(me%treeID(nElemsList)) allocate(me%ElemPropertyBits(nElemsList)) allocate(me%Part_First(commsize)) allocate(me%Part_Last(commsize)) ! Assign treeID list to the new mesh. Sort first offset = 0 nElems = int(nElemsList, kind=long_k) ! Determine offset for each process (get nElems of ranks before) call MPI_Exscan( nElems, offset, 1, MPI_INTEGER8, MPI_SUM, commLoc, iError ) me%elemOffset = offset me%nElems = nElemsList ! Last process offset + its nElems is total number of elements nElems = offset + nElems ! Distribute among new communicator call MPI_Bcast( nElems, 1, MPI_INTEGER8, commsize-1, commLoc, iError ) ! Store in global number of elements me%global%nElems = nElems ! Copy treeID list to treeID in the mesh me%treeID = tIDlist ! Reset properties. Not needed me%ElemPropertyBits = 0 ! Exchange Part_First and Part_Last call mpi_allgather( me%treeID(1), & & 1, MPI_INTEGER8, & & me%Part_First, 1, MPI_INTEGER8, & & commLoc, iError ) call mpi_allgather( me%treeID(nElemsList), & & 1, MPI_INTEGER8, & & me%Part_Last, 1, MPI_INTEGER8, & & commLoc, iError ) end subroutine treelmesh_fromList