tem_init_restart_create_types Subroutine

public subroutine tem_init_restart_create_types(me, elemOff, locElems)

This subroutine creates MPI types for reading the restart.

Arguments

Type IntentOptional Attributes Name
type(tem_restart_type), intent(inout) :: me
integer(kind=long_k), intent(in) :: elemOff
integer, intent(in) :: locElems

Calls

proc~~tem_init_restart_create_types~~CallsGraph proc~tem_init_restart_create_types tem_init_restart_create_types mpi_type_commit mpi_type_commit proc~tem_init_restart_create_types->mpi_type_commit mpi_type_contiguous mpi_type_contiguous proc~tem_init_restart_create_types->mpi_type_contiguous mpi_type_size mpi_type_size proc~tem_init_restart_create_types->mpi_type_size proc~check_mpi_error check_mpi_error proc~tem_init_restart_create_types->proc~check_mpi_error proc~tem_abort tem_abort proc~tem_init_restart_create_types->proc~tem_abort proc~check_mpi_error->proc~tem_abort mpi_error_string mpi_error_string proc~check_mpi_error->mpi_error_string mpi_abort mpi_abort proc~tem_abort->mpi_abort

Called by

proc~~tem_init_restart_create_types~~CalledByGraph proc~tem_init_restart_create_types tem_init_restart_create_types proc~tem_init_restart tem_init_restart proc~tem_init_restart->proc~tem_init_restart_create_types proc~hvs_output_init hvs_output_init proc~hvs_output_init->proc~tem_init_restart proc~tem_init_tracker tem_init_tracker proc~tem_init_tracker->proc~hvs_output_init

Source Code

  subroutine tem_init_restart_create_types( me, elemOff, locElems)
    ! -------------------------------------------------------------------- !
    type(tem_restart_type), intent(inout) :: me
    integer(kind=long_k),intent(in)       :: elemOff
    integer,intent(in)                    :: locElems
    ! -------------------------------------------------------------------- !
    ! local variables
    integer                               :: iError
    integer                               :: typesize
    ! -------------------------------------------------------------------- !

    if ( int(me%varMap%nScalars, MPI_OFFSET_KIND)             &
      &  * int(me%write_file%nDofs, MPI_OFFSET_KIND)          &
      &  * int(locelems, MPI_OFFSET_KIND) * 8_MPI_OFFSET_KIND &
      &  >= 2147483648_MPI_OFFSET_KIND                        ) then
      write(logunit(1),*) 'Error: local partition greater 2GB!'
      write(logunit(1),*) 'Most MPI implementations do not support this.'
      write(logunit(1),*) 'I will abort now, as this will likely result in'
      write(logunit(1),*) 'an error later on anyway.'
      write(logunit(1),*)
      write(logunit(1),*) 'Please make sure, to use a sufficient number of'
      write(logunit(1),*) 'processes to reduce the size of local partitions'
      write(logunit(1),*) 'to two GB.'
      write(logunit(1),*) 'Which would be less than ',                         &
        &                 int( real(2147483648_MPI_OFFSET_KIND, kind=rk)       &
        &                      / real( me%varMap%nScalars*me%write_file%nDofs  &
        &                              * 8_MPI_OFFSET_KIND, kind=rk)        ), &
        &                 ' elements for your element size.'
      call tem_abort()
    end if
    ! A contiguous type to describe the vector per element.
    ! MPI_TYPE_CONTIGUOUS(COUNT, OLDTYPE, NEWTYPE, IERROR)
    call MPI_Type_contiguous( me%varMap%nScalars*me%write_file%nDofs, &
      &                       rk_mpi,                                 &
      &                       me%write_file%vectype,                  &
      &                       iError                                  )
    call check_mpi_error( iError,                                             &
      &                   'create contiguous (write) vectype in init_restart' )

    ! Commit the type for creation
    call MPI_Type_commit( me%write_file%vectype, iError )
    call check_mpi_error( iError, 'commit (write) vectype in init_restart' )

    ! Create a MPI Contiguous as ftype for file view
    call MPI_Type_contiguous( locElems, me%write_file%vectype, &
      &                       me%write_file%ftype, iError      )
    call check_mpi_error( iError,                                           &
      &                   'create contiguous (write) ftype in init_restart' )

    ! commit the new contiguous type
    call MPI_Type_commit( me%write_file%ftype, iError )
    call check_mpi_error( iError, 'commit ftype in init_restart')

    ! get size of element
    call MPI_TYPE_SIZE(me%write_file%vectype, typesize, iError )
    call check_mpi_error(iError,'typesize in init_restart')

    ! set the start of view
    me%write_file%displacement= elemOff * typesize * 1_MPI_OFFSET_KIND

    if (me%read_file%nDofs /= me%write_file%nDofs) then
      if ( int(me%varMap%nScalars, MPI_OFFSET_KIND)             &
        &  * int(me%read_file%nDofs, MPI_OFFSET_KIND)           &
        &  * int(locelems, MPI_OFFSET_KIND) * 8_MPI_OFFSET_KIND &
        &  >= 2147483648_MPI_OFFSET_KIND                        ) then
        write(logunit(1),*) 'Error: local partition from restart greater 2GB!'
        write(logunit(1),*) 'Most MPI implementations do not support this.'
        write(logunit(1),*) 'I will abort now, as this will likely result in'
        write(logunit(1),*) 'an error later on anyway.'
        write(logunit(1),*)
        write(logunit(1),*) 'Please make sure, to use a sufficient number of'
        write(logunit(1),*) 'processes to reduce the size of local partitions'
        write(logunit(1),*) 'to two GB.'
        write(logunit(1),*) 'Which would be less than ',                       &
          &                 int( real(2147483648_MPI_OFFSET_KIND, kind=rk)     &
          &                      / real( me%varMap%nScalars*me%read_file%nDofs &
          &                              * 8_MPI_OFFSET_KIND, kind=rk)      ), &
          &                 ' elements for your elements in the restart file.'
        call tem_abort()
      end if

      ! MPI_TYPE_CONTIGUOUS(COUNT, OLDTYPE, NEWTYPE, IERROR)
      call MPI_Type_contiguous( me%varMap%nScalars*me%read_file%nDofs, &
        &                       rk_mpi,                                &
        &                       me%read_file%vectype,                  &
        &                       iError                                 )
      call check_mpi_error( iError,                                            &
        &                   'create contiguous (read) vectype in init_restart' )

      ! Commit the type for creation
      call MPI_Type_commit( me%read_file%vectype, iError )
      call check_mpi_error( iError, 'commit (read) vectype in init_restart')

      ! Create a MPI Contiguous as ftype for file view
      call MPI_Type_contiguous( locElems, me%read_file%vectype, &
        &                       me%read_file%ftype, iError      )
      call check_mpi_error( iError,                                          &
        &                   'create contiguous (read) ftype in init_restart' )

      ! commit the new contiguous type
      call MPI_Type_commit( me%read_file%ftype, iError )
      call check_mpi_error( iError, 'commit (read) ftype in init_restart' )

      ! get size of element
      call MPI_TYPE_SIZE(me%read_file%vectype, typesize, iError )
      call check_mpi_error(iError,'typesize in init_restart')

      ! set the start of view
      me%read_file%displacement = elemOff * typesize * 1_MPI_OFFSET_KIND

    else
      me%read_file%vectype = me%write_file%vectype
      me%read_file%ftype = me%write_file%ftype
      me%read_file%displacement=me%write_file%displacement
    end if

  end subroutine tem_init_restart_create_types