dump_treelmesh Subroutine

public subroutine dump_treelmesh(me, root_only)

Write a given mesh to disk. it is stored to the directory given in the tem_global_type.

Dump treelmesh_type%global to header.lua Dump treeID and propertyBits to elemlist.lsb (little endian environments) or elemlist.msb (big endian environments)

Arguments

Type IntentOptional Attributes Name
type(treelmesh_type), intent(inout) :: me

Mesh to dump to disk

logical, intent(in), optional :: root_only

root dump global mesh when true and all process dump its own mesh when false


Calls

proc~~dump_treelmesh~~CallsGraph proc~dump_treelmesh dump_treelmesh mpi_exscan mpi_exscan proc~dump_treelmesh->mpi_exscan mpi_file_close mpi_file_close proc~dump_treelmesh->mpi_file_close mpi_file_open mpi_file_open proc~dump_treelmesh->mpi_file_open mpi_file_set_view mpi_file_set_view proc~dump_treelmesh->mpi_file_set_view mpi_file_write_all mpi_file_write_all proc~dump_treelmesh->mpi_file_write_all mpi_type_commit mpi_type_commit proc~dump_treelmesh->mpi_type_commit mpi_type_contiguous mpi_type_contiguous proc~dump_treelmesh->mpi_type_contiguous mpi_type_free mpi_type_free proc~dump_treelmesh->mpi_type_free mpi_type_size mpi_type_size proc~dump_treelmesh->mpi_type_size proc~check_mpi_error check_mpi_error proc~dump_treelmesh->proc~check_mpi_error proc~dump_tem_global dump_tem_global proc~dump_treelmesh->proc~dump_tem_global proc~tem_create_endiansuffix tem_create_EndianSuffix proc~dump_treelmesh->proc~tem_create_endiansuffix mpi_error_string mpi_error_string proc~check_mpi_error->mpi_error_string proc~tem_abort tem_abort proc~check_mpi_error->proc~tem_abort aot_out_close aot_out_close proc~dump_tem_global->aot_out_close aot_out_close_table aot_out_close_table proc~dump_tem_global->aot_out_close_table aot_out_open aot_out_open proc~dump_tem_global->aot_out_open aot_out_open_table aot_out_open_table proc~dump_tem_global->aot_out_open_table aot_out_val aot_out_val proc~dump_tem_global->aot_out_val proc~dump_tem_prophead dump_tem_prophead proc~dump_tem_global->proc~dump_tem_prophead proc~dump_tem_prophead->aot_out_close_table proc~dump_tem_prophead->aot_out_open_table proc~dump_tem_prophead->aot_out_val mpi_abort mpi_abort proc~tem_abort->mpi_abort

Called by

proc~~dump_treelmesh~~CalledByGraph proc~dump_treelmesh dump_treelmesh proc~tem_adapt_dump_newmesh tem_adapt_dump_newMesh proc~tem_adapt_dump_newmesh->proc~dump_treelmesh proc~tem_dump_subtree tem_dump_subTree proc~tem_dump_subtree->proc~dump_treelmesh proc~tem_restart_openwrite tem_restart_openWrite proc~tem_restart_openwrite->proc~dump_treelmesh proc~tem_restart_openwrite->proc~tem_dump_subtree proc~hvs_output_init hvs_output_init proc~hvs_output_init->proc~tem_dump_subtree proc~hvs_output_open hvs_output_open proc~hvs_output_open->proc~tem_restart_openwrite proc~tem_write_debugmesh tem_write_debugMesh proc~tem_write_debugmesh->proc~tem_dump_subtree proc~tem_init_tracker tem_init_tracker proc~tem_init_tracker->proc~hvs_output_init proc~tem_tracker tem_tracker proc~tem_tracker->proc~hvs_output_open

Source Code

  subroutine dump_treelmesh( me, root_only )
    ! -------------------------------------------------------------------- !
    !> Mesh to dump to disk
    type(treelmesh_type), intent(inout) :: me
    !> root dump global mesh when true and
    !! all process dump its own mesh when false
    logical, intent(in), optional       :: root_only
    ! -------------------------------------------------------------------- !
    integer(kind=long_k), allocatable   :: buffer(:)
    integer                             :: fh, etype, ftype
    integer                             :: iostatus( MPI_STATUS_SIZE )
    integer                             :: iElem
    integer                             :: iError
    integer                             :: typesize
    logical                             :: root_not_participating
    character(len=300)                  :: ElemFileName
    character(len=4)                    :: EndianSuffix
    integer(kind=long_k)                :: file_offset
    integer(kind=MPI_OFFSET_KIND)       :: displacement
    ! -------------------------------------------------------------------- !

    if(present(root_only)) then
      root_not_participating = root_only
    else
      root_not_participating = .true.
    endif

    EndianSuffix = tem_create_EndianSuffix()

    ! Dump global info to header.lua
    ! if root_not_participating = .false. that each process dumps its own mesh
    ! set global%nElems to local%nElems to dump local nElems
    ! in header file of each process
    if (.not. root_not_participating) me%global%nElems = me%nElems

    call dump_tem_global(me%global, root_only)
    ElemFileName = trim(me%global%dirname) // 'elemlist' // EndianSuffix

    file_offset = 0
    allocate( buffer(me%nElems * 2) )

    ! Get the offset for MPI File view
    if(root_not_participating) then
      call MPI_Exscan( int(me%nElems,long_k), file_offset, 1, &
        &              long_k_mpi, MPI_SUM, me%global%comm, iError)
    else
      file_offset = 0
    endif

    ! Fill the buffer with data to be written on disk
    do iElem = 1, me%nElems
      buffer((iElem-1)*2+1) = me%treeID(iElem)
      buffer((iElem-1)*2+2) = me%ElemPropertyBits(iElem)
    end do

    write(logUnit(1),*) 'using MPI to write'

    ! Open the binary file for MPI I/O (Write)
    call MPI_File_open( me%global%comm,                       &
      &                 trim(ElemFileName),                   &
      &                 ior(MPI_MODE_WRONLY,MPI_MODE_CREATE), &
      &                 MPI_INFO_NULL,                        &
      &                 fh, iError                            )

    ! Catch if there was an exception by MPI
    call check_mpi_error(iError,'file_open in dump_treelmesh')

    ! Create a contiguous type to describe the vector per element
    call MPI_Type_contiguous( 2, long_k_mpi, etype, iError )
    call check_mpi_error(iError,'type etype in dump_treelmesh')
    call MPI_Type_commit(     etype, iError )

    ! Catch MPI Exception(s)
    call check_mpi_error(iError,'commit etype in dump_treelmesh')

    !get size of etype
    call MPI_Type_size(etype, typesize, iError )
    call check_mpi_error(iError,'typesize in dump_treelmesh')

    ! Create a MPI Contiguous as ftype for file view
    call MPI_Type_contiguous( me%nElems, etype, ftype, iError )
    call check_mpi_error(iError,'type ftype in dump_treelmesh')
    call MPI_Type_commit(     ftype, iError )


    ! Catch MPI Exception(s)
    call check_mpi_error(iError,'commit ftype in dump_treelmesh')

    ! calculate dsplacement for file view
    displacement = file_offset * typesize * 1_MPI_OFFSET_KIND

    ! Set the view for each process on the file above
    call MPI_File_set_view( fh, displacement, etype, ftype, "native", &
      &                     MPI_INFO_NULL, iError                     )

    ! Catch MPI Exception(s)
    call check_mpi_error(iError,'set_view in dump_treelmesh')

    ! Write the Data to File
    call MPI_File_write_all( fh, buffer, me%nElems, etype, iostatus, &
      &                      iError                                  )

    ! Catch MPI Exception(s)
    call check_mpi_error(iError,'write_all in dump_treelmesh')

    !Free the MPI_Datatypes which were created and close the file
    call MPI_Type_free(etype, iError)
    call check_mpi_error(iError,'free etype in dump_treelmesh')
    call MPI_Type_free(ftype, iError)
    call check_mpi_error(iError,'free ftype in dump_treelmesh')
    call MPI_File_close(fh,    iError)
    call check_mpi_error(iError,'close file in dump_treelmesh')

    deallocate( buffer )

  end subroutine dump_treelmesh