diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index 5c8a598af..fb4a88542 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -47,10 +47,10 @@ module MOD_PARTIT integer :: eDim_nod2D integer, allocatable, dimension(:) :: myList_nod2D - integer :: myDim_elem2D + integer :: myDim_elem2D, myDim_elem2D_shrinked integer :: eDim_elem2D integer :: eXDim_elem2D - integer, allocatable, dimension(:) :: myList_elem2D + integer, allocatable, dimension(:) :: myList_elem2D, myInd_elem2D_shrinked integer :: myDim_edge2D integer :: eDim_edge2D @@ -75,6 +75,7 @@ module MOD_PARTIT integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) integer :: MPIERR + !!! remPtr_* are constructed during the runtime and shall not be dumped!!! integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 422aada79..fea998944 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -89,6 +89,9 @@ module fesom_module subroutine fesom_init(fesom_total_nsteps) use fesom_main_storage_module +#if defined(__MULTIO) + use iom +#endif integer, intent(out) :: fesom_total_nsteps ! EO parameters logical mpi_is_initialized @@ -243,6 +246,10 @@ subroutine fesom_init(fesom_total_nsteps) write(*,*) '============================================' endif +#if defined(__MULTIO) + call iom_send_fesom_domains(f%partit, f%mesh) +#endif + ! f%dump_dir='DUMP/' ! INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) ! if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 03aba0b48..1fdfd7779 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -9,6 +9,46 @@ MODULE nemogcmcoup_steps INTEGER :: substeps !per IFS timestep END MODULE nemogcmcoup_steps +#if defined(__MULTIO) +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver, irequired, iprovided, lmpi1) + + ! Initialize the NEMO mppio server + USE mpp_io + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + INTEGER :: irequired, iprovided + LOGICAL :: lmpi1 + + CALL mpp_io_init(icomm, lnemoioserver, irequired, iprovided, lmpi1) +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + ! Initialize the NEMO mppio server + USE mpp_io + + IMPLICIT NONE + INTEGER :: icomm + + CALL mpp_io_init_2( icomm ) + IF (lioserver) THEN + ! IO server finished, clean-up multio objects + CALL mpp_stop() + ENDIF +END SUBROUTINE nemogcmcoup_init_ioserver_2 + +SUBROUTINE nemogcmcoup_end_ioserver + ! Function is only called for the IO client. + USE mpp_io + + IMPLICIT NONE + + CALL mpp_stop() + END SUBROUTINE nemogcmcoup_end_ioserver +#endif + SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, & & lwaveonly, iatmunit, lwrite ) diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index bc711a8c6..0beda3670 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -3,33 +3,6 @@ ! ! -Original code by Kristian Mogensen, ECMWF. -SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver - - -SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver_2 - - SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) @@ -332,16 +305,3 @@ SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & END SUBROUTINE nemogcmcoup_wam_update_stress -SUBROUTINE nemogcmcoup_end_ioserver - - ! Close io servers - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_end_ioserver - diff --git a/src/ifs_interface/iom.F90 b/src/ifs_interface/iom.F90 new file mode 100644 index 000000000..27ec77942 --- /dev/null +++ b/src/ifs_interface/iom.F90 @@ -0,0 +1,412 @@ +!===================================================== +! Input/Output manager : Library to write output files +! +! -Original code for NEMOv40 by ECMWF. +! -Adapted to FESOM2 by Razvan Aguridan, ECMWF, 2023. +!----------------------------------------------------- + +MODULE iom +#if defined(__MULTIO) + USE multio_api + USE, INTRINSIC :: iso_fortran_env, only: real64 + + IMPLICIT NONE + PRIVATE + + TYPE(multio_handle) :: mio_handle + INTEGER(8), PRIVATE :: mio_parent_comm + + PUBLIC iom_initialize, iom_init_server, iom_finalize + PUBLIC iom_send_fesom_domains + PUBLIC iom_field_request, iom_send_fesom_data + + PRIVATE ctl_stop + !!---------------------------------------------------------------------- + !! NEMO/OCE 4.0 , NEMO Consortium (2018) + !! $Id: iom.F90 13297 2020-07-13 08:01:58Z andmirek $ + !! Software governed by the CeCILL license (see ./LICENSE) + !!---------------------------------------------------------------------- + + TYPE iom_field_request + CHARACTER(100) :: name = REPEAT(" ", 100) + CHARACTER(5) :: gridType = REPEAT(" ", 5) + REAL(real64), DIMENSION(:), POINTER :: values => NULL() + INTEGER :: globalSize = 0 + INTEGER :: level = 0 + INTEGER :: step = 0 + END TYPE + +CONTAINS + + SUBROUTINE multio_custom_error_handler(context, err) + USE mpi + + IMPLICIT NONE + INTEGER(8), INTENT(INOUT) :: context ! Use mpi communicator as context + INTEGER, INTENT(IN) :: err + INTEGER :: mpierr + + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop( 'MULTIO ERROR: ', multio_error_string(err)) + IF (context /= MPI_UNDEFINED) THEN + CALL mpi_abort(int(context), MPI_ERR_OTHER, mpierr) + context = MPI_UNDEFINED + ENDIF + ENDIF + END SUBROUTINE + + + SUBROUTINE iom_initialize(client_id, local_comm, return_comm, global_comm ) + USE mpi + + IMPLICIT NONE + CHARACTER(LEN=*), INTENT(IN) :: client_id + INTEGER,INTENT(IN), OPTIONAL :: local_comm + INTEGER,INTENT(OUT), OPTIONAL :: return_comm + INTEGER,INTENT(IN), OPTIONAL :: global_comm + TYPE(multio_configuration) :: conf_ctx + INTEGER :: err + CHARACTER(len=16) :: err_str + + mio_parent_comm = mpi_comm_world + + err = multio_initialise() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Initializing multio failed: ', multio_error_string(err)) + END IF + + IF (PRESENT(global_comm)) THEN + mio_parent_comm = global_comm + ENDIF + + ! Prepare context and check errors explicitly until everything is set up - then failure handler is used + BLOCK + CHARACTER(:), allocatable :: config_file + INTEGER :: config_file_length + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', length=config_file_length) + IF (config_file_length == 0) THEN + call ctl_stop('The fesom plan file is not correctly set!') + err = conf_ctx%new() + ELSE + ALLOCATE(character(len=config_file_length + 1) :: config_file) + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', config_file) + err = conf_ctx%new(config_file) + + DEALLOCATE(config_file) + ENDIF + END BLOCK + + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Creating multio configuration context failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_allow_world_default_comm(.FALSE._1) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%mpi_allow_world_default_comm(.FALSE._1) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_client_id(client_id) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%mpi_client_id(', TRIM(client_id),') failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_return_client_comm(return_comm) + IF (err /= MULTIO_SUCCESS) THEN + WRITE (err_str, "(I10)") return_comm + CALL ctl_stop('conf_ctx%mpi_return_client_comm(', err_str,') failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_parent_comm(int(mio_parent_comm)) + IF (err /= MULTIO_SUCCESS) THEN + WRITE (err_str, "(I10)") mio_parent_comm + CALL ctl_stop('conf_ctx%mpi_parent_comm(', err_str,') failed: ', multio_error_string(err)) + END IF + + err = mio_handle%new(conf_ctx) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%new(conf_ctx) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%delete() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%delete() failed: ', multio_error_string(err)) + END IF + + ! Setting a failure handler that reacts on interface problems or exceptions that are not handled within the interface + err = multio_set_failure_handler(multio_custom_error_handler, mio_parent_comm) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('setting multio failure handler failed: ', multio_error_string(err)) + END IF + + err = mio_handle%open_connections(); + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%open_connections failed: ', multio_error_string(err)) + END IF + END SUBROUTINE iom_initialize + + SUBROUTINE iom_finalize() + IMPLICIT NONE + INTEGER :: err + + err = mio_handle%close_connections(); + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%close_connections failed: ', multio_error_string(err)) + END IF + + err = mio_handle%delete(); + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%delete failed: ', multio_error_string(err)) + END IF + END SUBROUTINE iom_finalize + + SUBROUTINE iom_init_server(server_comm) + IMPLICIT NONE + INTEGER, INTENT(IN) :: server_comm + type(multio_configuration) :: conf_ctx + INTEGER :: err + CHARACTER(len=16) :: err_str + + mio_parent_comm = server_comm + + err = multio_initialise() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Initializing multio failed: ', multio_error_string(err)) + END IF + + ! Prepare context and check errors explicitly until everything is set up - then failure handler is used + + BLOCK + CHARACTER(:), allocatable :: config_file + INTEGER :: config_file_length + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', length=config_file_length) + IF (config_file_length == 0) THEN + err = conf_ctx%new() + ELSE + ALLOCATE(character(len=config_file_length + 1) :: config_file) + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', config_file) + err = conf_ctx%new(config_file) + + DEALLOCATE(config_file) + ENDIF + END BLOCK + + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Creating multio server configuration context failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_allow_world_default_comm(.FALSE._1) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%mpi_allow_world_default_comm(.FALSE._1) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_parent_comm(int(mio_parent_comm)) + IF (err /= MULTIO_SUCCESS) THEN + WRITE (err_str, "(I10)") mio_parent_comm + CALL ctl_stop('conf_ctx%mpi_parent_comm(', err_str,') failed: ', multio_error_string(err)) + END IF + + ! Setting a failure handler that reacts on interface problems or exceptions that are not handled within the interface + ! Set handler before invoking blocking start server call + err = multio_set_failure_handler(multio_custom_error_handler, mio_parent_comm) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('setting multio failure handler failed: ', multio_error_string(err)) + END IF + + ! Blocking call + err = multio_start_server(conf_ctx) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('multio_start_server(conf_ctx) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%delete() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%delete() failed: ', multio_error_string(err)) + END IF + END SUBROUTINE iom_init_server + + SUBROUTINE iom_send_fesom_domains(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + + IMPLICIT NONE + + TYPE(multio_metadata) :: md + INTEGER :: cerr + INTEGER :: elem, elnodes(3), aux + TYPE(t_partit), INTENT(IN), TARGET :: partit + TYPE(t_mesh), intent(in), TARGET :: mesh + INTEGER, DIMENSION(:), POINTER :: temp + +#include "../associate_part_def.h" +#include "../associate_mesh_def.h" +#include "../associate_part_ass.h" +#include "../associate_mesh_ass.h" + + cerr = md%new() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%new() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("name", "ngrid") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_string(name) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("category", "fesom-domain-nodemap") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_string(category) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("representation", "unstructured") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_string(representation) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("globalSize", mesh%nod2D) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_int(globalSize) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_bool("toAllServers", .TRUE._1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_bool(toAllServers) failed: ', multio_error_string(cerr)) + END IF + + temp => partit%myList_nod2D(1:partit%myDim_nod2D) + cerr = mio_handle%write_domain(md, temp - 1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, mio_handle%write_domain() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%delete() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%delete() failed: ', multio_error_string(cerr)) + END IF + + !declare grid at elements + cerr = md%new() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%new() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("name", "egrid") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_string(name) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("category", "fesom-domain-elemmap") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_string(category) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("representation", "unstructured") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_string(representation) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("globalSize", mesh%elem2D) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_int(globalSize) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_bool("toAllServers", .TRUE._1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_bool(toAllServers) failed: ', multio_error_string(cerr)) + END IF + + cerr = mio_handle%write_domain(md, partit%myList_elem2D(partit%myInd_elem2D_shrinked) - 1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, mio_handle%write_domain() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%delete() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%delete() failed: ', multio_error_string(cerr)) + END IF + END SUBROUTINE iom_send_fesom_domains + + SUBROUTINE iom_send_fesom_data(data) + IMPLICIT NONE + + TYPE(iom_field_request), INTENT(INOUT) :: data + INTEGER :: cerr + TYPE(multio_metadata) :: md + + cerr = md%new() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%new() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("category", "fesom-grid-output") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(category) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("globalSize", data%globalSize) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(globalSize) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("level", data%level) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(level) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_bool("toAllServers", .FALSE._1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_bool(toAllServers) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("name", trim(data%name)) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(name) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("gridSubType", data%gridType) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(gridSubType) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("domain", data%gridType) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(domain) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("step", data%step) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(step) failed: ', multio_error_string(cerr)) + END IF + + cerr = mio_handle%write_field(md, data%values) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: mio_handle%write_field failed: ', multio_error_string(cerr)) + END IF + + cerr = md%delete() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%delete failed: ', multio_error_string(cerr)) + END IF + END SUBROUTINE + + SUBROUTINE ctl_stop(m1, m2, m3, m4) + USE mpi + + IMPLICIT NONE + CHARACTER(len=*), INTENT(in), OPTIONAL :: m1, m2, m3, m4 + INTEGER :: dummy + + IF ( PRESENT(m1) ) WRITE(*,*) m1 + IF ( PRESENT(m2) ) WRITE(*,*) m2 + IF ( PRESENT(m3) ) WRITE(*,*) m3 + IF ( PRESENT(m4) ) WRITE(*,*) m4 + + CALL mpi_abort(mpi_comm_world, 1, dummy) + END SUBROUTINE ctl_stop + + !!====================================================================== +#endif +END MODULE iom diff --git a/src/ifs_interface/mpp_io.F90 b/src/ifs_interface/mpp_io.F90 new file mode 100644 index 000000000..eda8feae7 --- /dev/null +++ b/src/ifs_interface/mpp_io.F90 @@ -0,0 +1,182 @@ +!===================================================== +! Ocean output intialisation. +! +! -Original code for NEMOv40 by Kristian Mogensen, ECMWF. +! -Adapted to FESOM2 by Razvan Aguridan, ECMWF, 2023. +!----------------------------------------------------- + +MODULE mpp_io +#if defined(__MULTIO) + USE iom + IMPLICIT NONE + PRIVATE + + PUBLIC & + & mpp_io_init, & + & mpp_io_init_2, & + & mpp_stop + + INTEGER :: ntask_multio = 0 + INTEGER :: ntask_xios = 0 + LOGICAL, PUBLIC :: lioserver, lmultioserver, lmultiproc + INTEGER :: ntask_notio + INTEGER, SAVE :: mppallrank, mppallsize, mppiorank, mppiosize + INTEGER, SAVE :: mppmultiorank, mppmultiosize + INTEGER, SAVE :: mppcomprank, mppcompsize + INTEGER, SAVE :: pcommworld, pcommworldmultio + + CONTAINS + + SUBROUTINE mpp_io_init( iicomm, lio, irequired, iprovided, lmpi1 ) + + INCLUDE "mpif.h" + INTEGER, INTENT(INOUT) :: iicomm + LOGICAL, INTENT(INOUT) :: lio + INTEGER, INTENT(INOUT) :: irequired, iprovided + LOGICAL, INTENT(IN) :: lmpi1 + + INTEGER :: icode, ierr, icolor + LOGICAL :: mpi_called + CHARACTER(len=128) :: cdlogfile + INTEGER :: ji + NAMELIST/namio/ntask_multio,ntask_xios + + CALL mpi_initialized( mpi_called, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init: Error in routine mpi_initialized' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + IF( mpi_called ) THEN + WRITE(*,*)' mpi_io_init assumes that it is initialising MPI' + CALL mpi_abort( mpi_comm_world, 1, ierr ) + ENDIF + + IF (lmpi1) THEN + CALL mpi_init( icode ) + ELSE +#ifdef MPI1 + WRITE(0,*)'mpp_io_init:' + WRITE(0,*)'MPI1 defined but lmpi1 is false' + CALL abort +#else + CALL mpi_init_thread(irequired,iprovided,icode) +#endif + ENDIF + + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init: Error in routine mpi_init' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + CALL mpi_comm_rank( mpi_comm_world, mppallrank, ierr ) + CALL mpi_comm_size( mpi_comm_world, mppallsize, ierr ) + + OPEN(10,file='namio.in') + READ(10,namio) + WRITE(*,namio) + CLOSE(10) + + IF ( ntask_xios + ntask_multio == 0 ) THEN + iicomm = mpi_comm_world + lio=.FALSE. + RETURN + ENDIF + + ntask_notio = mppallsize - ntask_xios - ntask_multio + IF ((mppallrank+1)<=ntask_notio) THEN + icolor=1 + lioserver=.FALSE. + lmultioserver=.FALSE. + ELSE + icolor=3 + lioserver=.TRUE. + lmultioserver=.TRUE. + ENDIF + lio=lioserver + + CALL mpi_comm_split( mpi_comm_world, icolor, 0, iicomm, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init: Error in routine mpi_comm_split' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + IF (lioserver) THEN + CALL mpi_comm_rank( iicomm, mppiorank, ierr ) + CALL mpi_comm_size( iicomm, mppiosize, ierr ) + WRITE(cdlogfile,'(A,I4.4,A)')'nemo_io_server.',mppiorank,'.log' + ELSE + mppiorank=0 + mppiosize=0 + ENDIF + lio=lioserver + + END SUBROUTINE mpp_io_init + + SUBROUTINE mpp_io_init_2( iicomm ) + + INTEGER, INTENT(INOUT) :: iicomm + + INTEGER :: icode, ierr, icolor, iicommx, iicommm, iicommo + INTEGER :: ji,inum + LOGICAL :: lcompp + INCLUDE "mpif.h" + + ! Construct multio server communicator + + IF (lmultioserver.OR..NOT.lioserver) THEN + icolor=12 + ELSE + icolor=13 + ENDIF + + CALL mpi_comm_split( iicomm, icolor, 0, pcommworldmultio, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init2: Error in routine mpi_comm_split' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + CALL mpi_comm_rank( pcommworldmultio, mppmultiorank, ierr ) + CALL mpi_comm_size( pcommworldmultio, mppmultiosize, ierr ) + + ! Construct compute communicator + + IF (.NOT.lioserver) THEN + icolor=14 + lcompp=.TRUE. + ELSE + icolor=15 + lcompp=.FALSE. + ENDIF + + CALL mpi_comm_split( iicomm, icolor, 0, iicommo, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init2: Error in routine mpi_comm_split' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + CALL mpi_comm_rank( iicommo, mppcomprank, ierr ) + CALL mpi_comm_size( iicommo, mppcompsize, ierr ) + + IF (.NOT.lioserver) THEN + CALL iom_initialize( "for_xios_mpi_id", return_comm=iicommm, global_comm = pcommworldmultio ) ! nemo local communicator given by xios + ELSE + ! For io-server tasks start an run the right server + CALL iom_init_server( server_comm = pcommworldmultio ) + ENDIF + + ! Return to the model with iicomm being compute only tasks + iicomm = iicommo + + END SUBROUTINE mpp_io_init_2 + + SUBROUTINE mpp_stop + INTEGER :: ierr + + IF (.NOT.lioserver) THEN + call iom_finalize() + ENDIF + + CALL mpi_finalize( ierr ) + END SUBROUTINE mpp_stop +#endif +END MODULE mpp_io diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 1edb8067c..6ff0dce80 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -20,6 +20,8 @@ module io_MEANDATA type(t_partit), pointer :: p_partit integer :: ndim integer :: glsize(2) + integer :: shrinked_size + integer, allocatable, dimension(:) :: shrinked_indx integer :: accuracy real(real64), allocatable, dimension(:,:) :: local_values_r8 real(real32), allocatable, dimension(:,:) :: local_values_r4 @@ -1127,6 +1129,9 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) use MOD_ICE use mod_tracer use io_gather_module +#if defined(__MULTIO) + use iom +#endif #if defined (__icepack) use icedrv_main, only: init_io_icepack #endif @@ -1142,11 +1147,10 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics type(t_ice) , intent(inout), target :: ice - - character(:), allocatable :: filepath - real(real64) :: rtime !timestamp of the record + character(:), allocatable :: filepath + real(real64) :: rtime !timestamp of the record - ctime=timeold+(dayold-1.)*86400 +ctime=timeold+(dayold-1.)*86400 !___________________________________________________________________________ if (lfirst) then @@ -1164,14 +1168,18 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ !PS if (partit%flag_debug .and. partit%mype==0) print *, achar(27)//'[33m'//' -I/O-> call update_means'//achar(27)//'[0m' call update_means - !___________________________________________________________________________ ! loop over defined streams do n=1, io_NSTREAMS !_______________________________________________________________________ ! make pointer for entry onto io_stream object entry=>io_stream(n) - +!#if defined(__MULTIO) +! call mio_write_nod(mio, entry) +! lfirst=.false. +! return +!#endif + !_______________________________________________________________________ !check whether output will be written based on event frequency do_output=.false. @@ -1196,6 +1204,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) ! if its time for output --> do_output==.true. if (do_output) then if (vec_autorotate) call io_r2g(n, partit, mesh) ! automatically detect if a vector field and rotate if makes sense! +#if !defined(__MULTIO) if(entry%thread_running) call entry%thread%join() entry%thread_running = .false. @@ -1248,7 +1257,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) entry%rec_count=max(entry%rec_count, 1) write(*,*) trim(entry%name)//': current mean I/O counter = ', entry%rec_count end if ! --> if(partit%mype == entry%root_rank) then - +#endif !___________________________________________________________________ ! write double precision output if (entry%accuracy == i_real8) then @@ -1273,17 +1282,22 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) END DO ! --> DO J=1, size(entry%local_values_r4,dim=2) !$OMP END PARALLEL DO end if ! --> if (entry%accuracy == i_real8) then - !___________________________________________________________________ entry%addcounter = 0 ! clean_meanarrays entry%ctime_copy = ctime - + +#if defined(__MULTIO) +! if (n==1) then + entry%rec_count = istep + call send_data_to_multio(entry) +! end if +#else !___________________________________________________________________ ! this is where the magic happens --> here do_output_callback is ! triggered as a method of the io_stream object --> call write_mean(...) call entry%thread%run() entry%thread_running = .true. - +#endif endif ! --> if (do_output) then end do ! --> do n=1, io_NSTREAMS lfirst=.false. @@ -1561,8 +1575,16 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, !___________________________________________________________________________ if(entry%glsize(1)==mesh%nod2D .or. entry%glsize(2)==mesh%nod2D) then entry%is_elem_based = .false. + entry%shrinked_size=partit%myDim_nod2D else if(entry%glsize(1)==mesh%elem2D .or. entry%glsize(2)==mesh%elem2D) then entry%is_elem_based = .true. + entry%shrinked_size=partit%myDim_elem2D_shrinked + allocate(entry%shrinked_indx(entry%shrinked_size)) + entry%shrinked_indx=partit%myInd_elem2D_shrinked +! write(*,*) partit%mype, partit%myDim_elem2D, partit%myDim_elem2D_shrinked, partit%myDim_elem2D-partit%myDim_elem2D_shrinked +! entry_index=0 +! call MPI_AllREDUCE(partit%myDim_elem2D_shrinked, entry_index, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, err) +! write(*,*) 'total elem=', mesh%elem2D, entry_index else if(partit%mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" stop @@ -1699,4 +1721,47 @@ subroutine io_r2g(n, partit, mesh) !$OMP END PARALLEL DO END IF end subroutine + +#if defined(__MULTIO) +SUBROUTINE send_data_to_multio(entry) + USE iom + USE multio_api + + IMPLICIT NONE + + TYPE(Meandata), TARGET, INTENT(INOUT) :: entry + TYPE(iom_field_request) :: request + REAL(real64), DIMENSION(SIZE(entry%shrinked_indx)), TARGET :: temp + INTEGER :: numLevels, globalSize, lev, i + + numLevels = entry%glsize(1) + globalSize = entry%glsize(2) + + request%name = trim(entry%name) + IF (.NOT. entry%is_elem_based) THEN + request%gridType = "ngrid" + ELSE + request%gridType = "egrid" + END IF + request%globalSize = globalSize + request%step = entry%rec_count + + ! loop over vertical layers --> do gather 3d variables layerwise in 2d slices + DO lev=1, numLevels + request%level = lev + + IF (.NOT. entry%is_elem_based) THEN + request%values => entry%local_values_r8_copy(lev, 1:entry%shrinked_size) + ELSE + DO i = 1, SIZE(entry%shrinked_indx) + temp(i) = entry%local_values_r8_copy(lev, entry%shrinked_indx(i)) + END DO + + request%values => temp + END IF + + CALL iom_send_fesom_data(request) + END DO +END SUBROUTINE +#endif end module diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 653785313..1843e345b 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -842,28 +842,28 @@ SUBROUTINE read_mesh(partit, mesh) n=com_elem2D_full%sptr(com_elem2D_full%sPEnum+1)-1 ALLOCATE(com_elem2D_full%slist(n)) read(fileID,*) com_elem2D_full%slist - -!!$ read(fileID,*) com_edge2D%rPEnum -!!$ ALLOCATE(com_edge2D%rPE(com_edge2D%rPEnum)) -!!$ read(fileID,*) com_edge2D%rPE -!!$ ALLOCATE(com_edge2D%rptr(com_edge2D%rPEnum+1)) -!!$ read(fileID,*) com_edge2D%rptr -!!$ ALLOCATE(com_edge2D%rlist(eDim_edge2D)) -!!$ read(fileID,*) com_edge2D%rlist -!!$ -!!$ read(fileID,*) com_edge2D%sPEnum -!!$ ALLOCATE(com_edge2D%sPE(com_edge2D%sPEnum)) -!!$ read(fileID,*) com_edge2D%sPE -!!$ ALLOCATE(com_edge2D%sptr(com_edge2D%sPEnum+1)) -!!$ read(fileID,*) com_edge2D%sptr -!!$ n=com_edge2D%sptr(com_edge2D%sPEnum+1)-1 -!!$ ALLOCATE(com_edge2D%slist(n)) -!!$ read(fileID,*) com_edge2D%slist close(fileID) + if (mype==0) write(*,*) 'communication arrays are read' deallocate(rbuff, ibuff) deallocate(mapping) - + +! necessary for MULTIO auxuary data: +! one element might belong to several processes hence we unify the element partition +! such that sum(myDim_elem2D_shrinked) over all processors will give elem2D + partit%myDim_elem2D_shrinked=0 + DO n=1, myDim_elem2D + if (mesh%elem2D_nodes(1, n) > myDim_nod2D) cycle + partit%myDim_elem2D_shrinked=partit%myDim_elem2D_shrinked+1 + END DO + allocate(partit%myInd_elem2D_shrinked(partit%myDim_elem2D_shrinked)) +! fill the respective indicies + nn=1 + DO n=1, myDim_elem2D + if (mesh%elem2D_nodes(1, n) > myDim_nod2D) cycle + partit%myInd_elem2D_shrinked(nn)=n + nn=nn+1 + END DO ! no checksum for now, execute_command_line is failing too often. if you think it is important, please drop me a line and I will try to revive it: jan.hegewald@awi.de mesh%representative_checksum = ''