load_treelmesh Subroutine

public subroutine load_treelmesh(me, nParts)

This subroutine reads a mesh in treelm format from disk at the specified directory name.

The mesh is so small, it probably is better to read it on one process and distribute the resulting data via the network. single proc read

single proc read

Arguments

Type IntentOptional Attributes Name
type(treelmesh_type), intent(inout) :: me

Structure to load the mesh to

integer, intent(in) :: nParts

Directory containing the mesh informations Partition to use on the calling process (= MPI Rank in comm) Number of partitions, the mesh is partitioned into (= Number of MPI processes in comm).


Calls

proc~~load_treelmesh~~CallsGraph proc~load_treelmesh load_treelmesh mpi_allgather mpi_allgather proc~load_treelmesh->mpi_allgather mpi_bcast mpi_bcast proc~load_treelmesh->mpi_bcast mpi_file_close mpi_file_close proc~load_treelmesh->mpi_file_close mpi_file_open mpi_file_open proc~load_treelmesh->mpi_file_open mpi_file_read_all mpi_file_read_all proc~load_treelmesh->mpi_file_read_all mpi_file_set_view mpi_file_set_view proc~load_treelmesh->mpi_file_set_view mpi_type_commit mpi_type_commit proc~load_treelmesh->mpi_type_commit mpi_type_contiguous mpi_type_contiguous proc~load_treelmesh->mpi_type_contiguous mpi_type_free mpi_type_free proc~load_treelmesh->mpi_type_free mpi_type_size mpi_type_size proc~load_treelmesh->mpi_type_size proc~check_mpi_error check_mpi_error proc~load_treelmesh->proc~check_mpi_error proc~gather_property gather_property proc~load_treelmesh->proc~gather_property proc~tem_abort tem_abort proc~load_treelmesh->proc~tem_abort proc~tem_create_endiansuffix tem_create_EndianSuffix proc~load_treelmesh->proc~tem_create_endiansuffix proc~tem_open tem_open proc~load_treelmesh->proc~tem_open proc~check_mpi_error->proc~tem_abort mpi_error_string mpi_error_string proc~check_mpi_error->mpi_error_string mpi_exscan mpi_exscan proc~gather_property->mpi_exscan mpi_abort mpi_abort proc~tem_abort->mpi_abort proc~tem_open->proc~tem_abort proc~newunit newunit proc~tem_open->proc~newunit proc~upper_to_lower upper_to_lower proc~tem_open->proc~upper_to_lower

Called by

proc~~load_treelmesh~~CalledByGraph proc~load_treelmesh load_treelmesh proc~load_tem load_tem proc~load_tem->proc~load_treelmesh proc~tem_restart_readheader tem_restart_readHeader proc~tem_restart_readheader->proc~load_tem proc~tem_load_restart tem_load_restart proc~tem_load_restart->proc~tem_restart_readheader

Source Code

  subroutine load_treelmesh(me, nParts )
    ! -------------------------------------------------------------------- !
    !> Structure to load the mesh to
    type(treelmesh_type), intent(inout) :: me
    !> Directory containing the mesh informations
    ! character(len=*), intent(in) :: dirname
    !> Partition to use on the calling process (= MPI Rank in comm)
    ! integer, intent(in) :: myPart
    !> Number of partitions, the mesh is partitioned into (= Number of MPI
    !! processes in comm).
    integer, intent(in) :: nParts
    ! -------------------------------------------------------------------- !
    logical :: ex
    integer :: iElem
    integer :: iProp
    integer :: fh, etype, ftype
    integer :: iostatus( MPI_STATUS_SIZE )
    integer :: iError
    integer :: file_rec_len
    integer :: typesize
    character(len=300)   :: ElemFileName
    character(len=4)     :: EndianSuffix
    integer(kind=long_k), allocatable :: buffer(:)
    integer(kind=long_k), allocatable :: globbuffer(:)
    integer(kind=MPI_OFFSET_KIND)     :: displacement
    ! -------------------------------------------------------------------- !

    EndianSuffix = tem_create_EndianSuffix()
    ElemFileName = trim(me%global%dirname)//'elemlist'//EndianSuffix
    write(logUnit(1), *) 'Load mesh from file: '//trim(ElemFileName)

    if (associated(me%property)) deallocate(me%property)
    allocate(me%Property(me%global%nProperties))
    allocate(me%Part_First(nParts))
    allocate(me%Part_Last(nParts))

    allocate(me%treeID(me%nElems))

    if (me%global%myPart == 0) then
      ! Only one process should check the file existence
      inquire(file=trim(ElemFileName), exist=ex)
      if (.not. ex) then
        write(logUnit(1),*) 'ERROR: File ' // trim(ElemFileName) &
          &                 // ' not found!'
        write(logUnit(1),*) 'Aborting.'
        call tem_abort()
      end if
    end if

    allocate(me%ElemPropertyBits(me%nElems))
    allocate(buffer(me%nElems*2))
    if (me%global%nElems*2 > io_buffer_size) then
      write(logUnit(1),*) 'using MPI to read'

      ! Create a contiguous type to describe the vector per element
      call MPI_Type_contiguous( 2, long_k_mpi, etype, iError )
      call check_mpi_error(iError,'type etype in load_treelmesh')
      call MPI_Type_commit(     etype, iError )
      call check_mpi_error(iError,'commit etype in load_treelmesh')
      call MPI_Type_size(etype, typesize, iError )
      call check_mpi_error(iError,'typesize in load_treelmesh')

      ! Create a MPI Subarray  as ftype for file view
      call MPI_Type_contiguous( me%nElems, etype, ftype, iError )
      call check_mpi_error(iError,'type ftype in load_treelmesh')
      call MPI_Type_commit(     ftype, iError )
      call check_mpi_error(iError,'commit ftype in load_treelmesh')

      ! Calculate displacement for file view
      displacement = me%elemOffset * typesize * 1_MPI_OFFSET_KIND

      ! Set the view for each process on the file above
      ! Open the binary file for MPI I/O (Write)
      call MPI_File_open( me%global%comm, trim(ElemFileName),        &
        &                 MPI_MODE_RDONLY, MPI_INFO_NULL, fh, iError )
      call check_mpi_error(iError,'file_open in load_treelmesh')

      call MPI_File_set_view( fh, displacement, etype, ftype, "native", &
        &                     MPI_INFO_NULL, iError                     )
      call check_mpi_error(iError,'set_view in load_treelmesh')
      ! Read data from the file
      call MPI_File_read_all( fh, buffer, me%nElems, etype, iostatus, iError )
      call check_mpi_error(iError,'read_all in load_treelmesh')

      !Free the MPI_Datatypes which were created and close the file
      call MPI_Type_free (etype, iError)
      call check_mpi_error(iError,'free etype in load_treelmesh')
      call MPI_Type_free (ftype, iError)
      call check_mpi_error(iError,'free ftype in load_treelmesh')
      call MPI_File_close(fh,    iError)
      call check_mpi_error(iError,'close file in load_treelmesh')

      ! END IO-part

    else

      !> The mesh is so small, it probably is better to read it on one process
      !! and distribute the resulting data via the network.
      write(logUnit(1),*) 'using one process to read and broadcast'
      allocate( globbuffer(me%global%nElems*2) )
      if (me%global%myPart == 0) then !! single proc read

        inquire(iolength = file_rec_len) globbuffer
        call tem_open( newunit = fh,                 &
          &            file    = trim(ElemFileName), &
          &            recl    = file_rec_len,       &
          &            action  = 'read',             &
          &            access  = 'direct',           &
          &            form    = 'unformatted'       )

        read(fh, rec=1) globbuffer

        close(fh)

      end if !! single proc read

      call MPI_Bcast( globbuffer, int(2*me%global%nElems), long_k_mpi, 0, &
        &             me%global%comm, iError                              )

      buffer = globbuffer(me%elemOffset*2+1:(me%ElemOffset+me%nElems)*2)
      deallocate(globbuffer)

    end if ! END IO-part

    ! Fill the tree with what was read in buffer from disk
    do iElem = 1, me%nElems
      me%treeID(iElem)           = buffer( (iElem-1)*2 + 1 )
      me%ElemPropertyBits(iElem) = buffer( (iElem-1)*2 + 2 )
    end do

    deallocate( buffer )

    ! clear the prp_sendHalo property since the distribution of elements
    ! might have changed
    do iElem=1,me%nElems
      me%ElemPropertyBits(iElem)  = ibclr( me%ElemPropertyBits(iElem), &
        &                                  prp_sendHalo                )
    end do

    call MPI_allgather( me%treeID(1), 1, long_k_mpi,  &
      &                 me%Part_First, 1, long_k_mpi, &
      &                 me%global%comm, iError        )
    call MPI_allgather( me%treeID(me%nElems), 1, long_k_mpi, &
      &                 me%Part_Last, 1, long_k_mpi,         &
      &                 me%global%comm, iError               )

    do iProp=1,me%global%nProperties
      call gather_Property( Property = me%Property(iProp),        &
        &                   Header   = me%global%Property(iProp), &
        &                   BitField = me%ElemPropertyBits,       &
        &                   comm     = me%global%comm             )
    end do

    write(logUnit(1),*) 'Done, reading the mesh!'

  end subroutine load_treelmesh