From 10f79ff304904bd17843193f0cfef8b8617ae77e Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 22 Nov 2022 13:08:59 +0000 Subject: [PATCH 01/25] Rebasing Cycle 3 updates on '469e74b1' --- configure_any.sh | 2 +- src/ifs_interface/ifs_interface.F90 | 794 ++++------------------------ src/ifs_interface/ifs_modules.F90 | 253 ++++++++- 3 files changed, 353 insertions(+), 696 deletions(-) diff --git a/configure_any.sh b/configure_any.sh index 7c63dae66..ceefd8d04 100755 --- a/configure_any.sh +++ b/configure_any.sh @@ -18,7 +18,7 @@ source env.sh # source this from your run script too if [[ ${LIB} = yes ]]; then mkdir build.lib || true # build dir for library cd build.lib - cmake -DBUILD_FESOM_AS_LIBRARY=ON .. # not required when re-compiling + cmake -DBUILD_FESOM_AS_LIBRARY=ON -DFESOM_INSTALL_PREFIX=/perm/ne1/fesom2/ -DBUILD_SHARED_LIBS=ON -DDISABLE_MULTITHREADING=ON -DENABLE_OPENMP=OFF .. # not required when re-compiling sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround for the moment on cray else mkdir build || true # build dir for binary diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index d9a6fc09a..5fbf061d2 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -61,6 +61,7 @@ SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, WRITE(0,*)'! FESOM is initialized from within IFS.' WRITE(0,*)'! get MPI_COMM_FESOM. =================' WRITE(0,*)'! main_initialize done. ===============' + WRITE(0,*)'Thomas/Kristian neu' endif ! Set more information for the caller @@ -132,7 +133,9 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & & lwritedist, & & lcommout, & & commoutprefix,& - & lparbcast + & lparbcast, & + & lparinterp2p, & + & lparintmultatm ! Global number of gaussian gridpoints INTEGER :: nglopoints @@ -176,6 +179,7 @@ SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & cdpathdist = './' lreaddist = .FALSE. lwritedist = .FALSE. + lparintmultatm = .TRUE. OPEN(9,file='namfesomcoup.in') READ(9,namfesomcoup) @@ -379,13 +383,19 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & INTEGER, INTENT(IN) :: nopoints ! Local variables - REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D) :: zsend - REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D) :: zsendU, zsendV + INTEGER , PARAMETER :: maxnfield = 6 + INTEGER , PARAMETER :: maxnfielduv = 2 + INTEGER :: nfield = 0 + INTEGER :: nfielduv = 0 + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D,maxnfield) :: zsendnf + REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D,maxnfielduv) :: zsendnfUV + REAL(wpIFS), DIMENSION(nopoints,maxnfield) :: zrecvnf + REAL(wpIFS), DIMENSION(nopoints,maxnfielduv) :: zrecvnfUV INTEGER :: elnodes(3) REAL(wpIFS) :: rlon, rlat ! Loop variables - INTEGER :: n, elem, ierr + INTEGER :: n, elem, ierr, jf !#include "associate_mesh.h" ! associate what is needed only @@ -405,74 +415,106 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & ice_alb => fesom%ice%atmcoupl%ice_alb(:) tmelt => fesom%ice%thermo%tmelt ! scalar const. + + nfield = 0 ! =================================================================== ! ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. - do n=1,myDim_nod2D - zsend(n)=fesom%tracers%data(1)%values(1, n) +tmelt ! sea surface temperature [K], + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=fesom%tracers%DATA(1)%values(1, n) +tmelt ! sea surface temperature [K], ! (1=surface, n=node, data(1/2)=T/S) - enddo - - ! Interpolate SST - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgsst ) - + ENDDO ! =================================================================== ! - ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. - ! zsend(:)=a_ice(:) - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, a_ice, & - & nopoints, pgifr ) - - + ! Pack ice fraction data [0..1] + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=a_ice(n) + ENDDO + ! =================================================================== ! ! Pack ice temperature data (already in K) - zsend(:)=ice_temp - - ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgist ) - + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=ice_temp(n) + ENDDO ! =================================================================== ! ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. - zsend(:)=ice_alb - - ! Interpolate ice albedo - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pgalb ) - + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=ice_alb(n) + ENDDO ! =================================================================== ! ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. - zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=m_ice(n)/MAX(a_ice(n),0.01) ! ice thickness (mean over ice) + ENDDO + + ! =================================================================== ! + ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=m_snow(n)/MAX(a_ice(n),0.01) ! snow thickness (mean over ice) + ENDDO - ! Interpolation of average ice thickness - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pghic ) + ! =================================================================== ! + ! Interpolate all fields + IF (lparintmultatm) THEN + CALL parinter_fld_mult( nfield, mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsendnf, & + & nopoints, zrecvnf ) + ELSE + DO jf = 1, nfield + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsendnf(:,jf), & + & nopoints, zrecvnf(:,jf) ) + ENDDO + ENDIF + nfield = 0 + ! =================================================================== ! + ! Unpack 'pgsst' on Gauss. + ! zsend(:)=a_ice(:) + nfield = nfield + 1 + pgsst(:) = zrecvnf(:,nfield) + ! + ! =================================================================== ! + ! Unpack 'pgifr' on Gauss. + ! zsend(:)=a_ice(:) + nfield = nfield + 1 + pgifr(:) = zrecvnf(:,nfield) + ! + ! =================================================================== ! + ! Unpack ice temperature data (already in K) + nfield = nfield + 1 + pgist(:) = zrecvnf(:,nfield) ! =================================================================== ! - ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. - zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) + ! Unpack ice albedo data pgalb on Gaussian grid. + nfield = nfield + 1 + pgalb(:) = zrecvnf(:,nfield) - ! Interpolation of snow thickness - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & myDim_nod2D, zsend, & - & nopoints, pghsn ) + ! =================================================================== ! + ! Unpack ice thickness data pghic on Gaussian grid. + nfield = nfield + 1 + pghic(:) = zrecvnf(:,nfield) + ! =================================================================== ! + ! Unpack snow thickness data pghsn on Gaussian grid. + nfield = nfield + 1 + pghsn(:) = zrecvnf(:,nfield) ! =================================================================== ! ! Surface currents need to be rotated to geographical grid ! Pack u(v) surface currents - zsendU(:)=fesom%dynamics%UV(1,1,1:myDim_elem2D) - zsendV(:)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here - + zsendnfUV(:,1)=fesom%dynamics%UV(1,1,1:myDim_elem2D) + zsendnfUV(:,2)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here + nfielduv = 2 + do elem=1, myDim_elem2D ! compute element midpoints @@ -481,24 +523,30 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS ! Rotate vectors to geographical coordinates (r2g) - call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord + CALL vector_r2g(zsendnfUV(elem,1), zsendnfUV(elem,2), rlon, rlat, 0) ! 0-flag for rot. coord end do - + #ifdef FESOM_TODO ! We need to sort out the non-unique global index before we ! can couple currents ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendU, & - & nopoints, pgucur ) - - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_elem2D, zsendV, & - & nopoints, pgvcur ) - + IF (lparintmultatm) THEN + CALL parinter_fld_mult( nfielduv, mype, npes, icomm, UVtogauss, & + & myDim_nod2D, zsendnfUV, & + & nopoints, zrecvnfUV ) + ELSE + DO jf = 1, nfielduv + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_nod2D, zsendnfUV(:,jf), & + & nopoints, zrecvnfUV(:,jf) ) + ENDDO + ENDIF + pgucur(:) = zrecvnfUV(:,1) + pgvcur(:) = zrecvnfUV(:,2) + #else pgucur(:) = 0.0 @@ -837,636 +885,6 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !do_rotate_ice_wind=.false. !end if - -#ifdef FESOM_TODO - - ! Packed receive buffer - REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv - ! Unpacked fields on ORCA grids - REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice - REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol - REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff - REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr - ! Arrays for rotation - REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau - ! Lead fraction for both LIM2/LIM3 - REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld - ! Mask for masking for I grid - REAL(wpIFS) :: zmsksum - ! For summing up LIM3 contributions to ice temperature - REAL(wpIFS) :: zval,zweig - - ! Loop variables - INTEGER :: ji,jj,jk,jl - ! netCDF debugging output variables - CHARACTER(len=128) :: cdoutfile - INTEGER :: inum - REAL(wpIFS) :: zhook_handle ! Dr Hook handle - - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) - IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') - - ! Allocate the storage data - - IF (.NOT.lallociceflx) THEN - ALLOCATE( & - & zsqns_tot(jpi,jpj), & - & zsqns_ice(jpi,jpj), & - & zsqsr_tot(jpi,jpj), & - & zsqsr_ice(jpi,jpj), & - & zsemp_tot(jpi,jpj), & - & zsemp_ice(jpi,jpj), & - & zsevap_ice(jpi,jpj), & - & zsdqdns_ice(jpi,jpj), & - & zssprecip(jpi,jpj), & - & zstprecip(jpi,jpj), & - & zstcc(jpi,jpj), & - & zslcc(jpi,jpj), & - & zsatmist(jpi,jpj), & - & zsqns_ice_add(jpi,jpj)& - & ) - lallociceflx = .TRUE. - ENDIF - IF (.NOT.lallocstress) THEN - ALLOCATE( & - & zsutau(jpi,jpj), & - & zsvtau(jpi,jpj), & - & zsutau_ice(jpi,jpj), & - & zsvtau_ice(jpi,jpj) & - & ) - lallocstress = .TRUE. - ENDIF - - ! Sort out incoming arrays from the IFS and put them on the ocean grid - - !1. Interpolate ocean solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean solar radiation - - zqs___oce(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqs___oce(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !2. Interpolate ice solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice solar radiation - - zqs___ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqs___ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !3. Interpolate ocean non-solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean non-solar radiation - - zqns__oce(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqns__oce(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !4. Interpolate ice non-solar radiation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice non-solar radiation - - zqns__ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zqns__ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !5. Interpolate D(q)/dT to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack D(q)/D(T) - - zdqdt_ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zdqdt_ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !6. Interpolate total evaporation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack total evaporation - - zevap_tot(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zevap_tot(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !7. Interpolate evaporation over ice to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack evaporation over ice - - zevap_ice(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zevap_ice(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !8. Interpolate liquid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack liquid precipitation - - zprcp_liq(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zprcp_liq(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !9. Interpolate solid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack precipitation over ice - - zprcp_sol(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zprcp_sol(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !10. Interpolate runoff to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack runoff - - zrunoff(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zrunoff(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !11. Interpolate ocean runoff to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zocerunoff(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zocerunoff(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !12. Interpolate total cloud fractions to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zstcc(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zstcc(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - !13. Interpolate low cloud fractions to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ocean runoff - - zslcc(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zslcc(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! get sea ice fraction and lead fraction - -#if defined key_lim2 - zfrld(:,:) = frld(:,:) - zicefr(:,:) = 1 - zfrld(:,:) -#else - zicefr(:,:) = 0.0_wpIFS - DO jl = 1, jpl - zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) - ENDDO - zfrld(:,:) = 1 - zicefr(:,:) -#endif - - zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) - zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) - ! More consistent with NEMO, but does changes the results, so - ! we don't do it for now. - ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) - zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) - zssprecip(:,:) = - zsemp_ice(:,:) - zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) - zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) - zsevap_ice(:,:) = zevap_ice(:,:) - - ! non solar heat fluxes ! (qns) - IF (loceicemix) THEN - zsqns_tot(:,:) = zqns__oce(:,:) - ELSE - zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) - ENDIF - zsqns_ice(:,:) = zqns__ice(:,:) - ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting - - zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean - ! solar heat fluxes ! (qsr) - - IF (loceicemix) THEN - zsqsr_tot(:,:) = zqs___oce(:,:) - ELSE - zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) - ENDIF - zsqsr_ice(:,:) = zqs___ice(:,:) - - IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle - zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) - zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) - ENDIF - - zsdqdns_ice(:,:) = zdqdt_ice(:,:) - - ! Apply lateral boundary condition - - CALL lbc_lnk(zsqns_tot, 'T', 1.0) - CALL lbc_lnk(zsqns_ice, 'T', 1.0) - CALL lbc_lnk(zsqsr_tot, 'T', 1.0) - CALL lbc_lnk(zsqsr_ice, 'T', 1.0) - CALL lbc_lnk(zsemp_tot, 'T', 1.0) - CALL lbc_lnk(zsemp_ice, 'T', 1.0) - CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) - CALL lbc_lnk(zssprecip, 'T', 1.0) - CALL lbc_lnk(zstprecip, 'T', 1.0) - CALL lbc_lnk(zstcc, 'T', 1.0) - CALL lbc_lnk(zslcc, 'T', 1.0) - - ! Interpolate atmospheric ice temperature to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack atmospheric ice temperature - - zsatmist(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zsatmist(ji,jj) = zrecv(jk) - ENDDO - ENDDO - CALL lbc_lnk(zsatmist, 'T', 1.0) - - zsqns_ice_add(:,:) = 0.0_wpIFS - - ! Use the dqns_ice filter - - IF (lqnsicefilt) THEN - - ! Add filtr to qns_ice - -#if defined key_lim2 - ztmp(:,:) = tn_ice(:,:,1) -#else - DO jj = nldj, nlej - DO ji = nldi, nlei - zval=0.0 - zweig=0.0 - DO jl = 1, jpl - zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) - zweig = zweig + a_i(ji,jj,jl) - ENDDO - IF ( zweig > 0.0 ) THEN - ztmp(ji,jj) = zval /zweig - ELSE - ztmp(ji,jj) = rt0 - ENDIF - ENDDO - ENDDO - CALL lbc_lnk(ztmp, 'T', 1.0) -#endif - - WHERE ( zicefr(:,:) > .001_wpIFS ) - zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) - END WHERE - - zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) - - ENDIF - - ! Interpolate u-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack u stress on U grid - - zuu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate v-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack v stress on U grid - - zvu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate u-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack u stress on V grid - - zuv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate v-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack v stress on V grid - - zvv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Rotate stresses from en to ij and put u,v stresses on U,V grids - - CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) - - ! Apply lateral boundary condition on u,v stresses on the U,V grids - - CALL lbc_lnk( zsutau, 'U', -1.0 ) - CALL lbc_lnk( zsvtau, 'V', -1.0 ) - - ! Interpolate ice u-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice u stress on U grid - - zuu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice v-stress to U grid - - CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice v stress on U grid - - zvu(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvu(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice u-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice u stress on V grid - - zuv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zuv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Interpolate ice v-stress to V grid - - CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) - - ! Unpack ice v stress on V grid - - zvv(:,:) = 0.0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) - zvv(ji,jj) = zrecv(jk) - ENDDO - ENDDO - - ! Rotate stresses from en to ij and put u,v stresses on U,V grids - - CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) - - ! Apply lateral boundary condition on u,v stresses on the U,V grids - - CALL lbc_lnk( zutau, 'U', -1.0 ) - CALL lbc_lnk( zvtau, 'V', -1.0 ) - -#if defined key_lim2_vp - - ! Convert to I grid for LIM2 for key_lim_vp - DO jj = 2, jpjm1 ! (U,V) ==> I - DO ji = 2, jpim1 ! NO vector opt. - zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) - zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & - & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) - IF ( zmsksum > 0.0 ) THEN - zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum - ENDIF - zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) - zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & - & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) - IF ( zmsksum > 0.0 ) THEN - zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum - ENDIF - END DO - END DO - -#else - - zsutau_ice(:,:) = zutau(:,:) - zsvtau_ice(:,:) = zvtau(:,:) - -#endif - - CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) - CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) - - ! Optionally write files write the data on the ORCA grid via IOM. - - IF (ldebug) THEN - WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) - CALL iom_close( inum ) - WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt - CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) - CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) - CALL iom_close( inum ) - ENDIF - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) - -#else - - !FESOM part - !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' - !CALL par_ex - -#endif - END SUBROUTINE nemogcmcoup_lim2_update diff --git a/src/ifs_interface/ifs_modules.F90 b/src/ifs_interface/ifs_modules.F90 index 5e18ad10e..98a543373 100644 --- a/src/ifs_interface/ifs_modules.F90 +++ b/src/ifs_interface/ifs_modules.F90 @@ -1105,6 +1105,7 @@ MODULE parinter INTEGER :: nrecvtot INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp END TYPE parinterinfo + LOGICAL, PUBLIC :: lparinterp2p = .TRUE. CONTAINS @@ -1503,9 +1504,18 @@ SUBROUTINE parinter_fld( mype, nproc, mpi_comm, & ! Local variables ! MPI send/recv buffers +#if defined key_parinter_alloc + REAL(scripdp) , ALLOCATABLE :: zsend(:),zrecv(:) +#else REAL(scripdp) :: zsend(pinfo%nsendtot),zrecv(pinfo%nrecvtot) +#endif ! Misc variables - INTEGER :: i,istatus + INTEGER :: i,iproc,istatus,ierr,off,itag,irq,nreqs + INTEGER :: reqs(0:2*(nproc-1)) + +#if defined key_parinter_alloc + ALLOCATE(zsend(pinfo%nsendtot),zrecv(pinfo%nrecvtot)) +#endif ! Pack the sending buffer @@ -1517,13 +1527,57 @@ SUBROUTINE parinter_fld( mype, nproc, mpi_comm, & #if defined key_mpp_mpi - CALL mpi_alltoallv(& - & zsend,pinfo%nsend(0:nproc-1),& - & pinfo%nsdisp(0:nproc-1),mpi_double_precision, & - & zrecv,pinfo%nrecv(0:nproc-1), & - & pinfo%nrdisp(0:nproc-1),mpi_double_precision, & - & mpi_comm,istatus) + IF (lparinterp2p) THEN + + ! total num of reqs ( recv + send ) + nreqs = 2*(nproc-1) + ! post irecvs first + irq = 0 + DO iproc=0,nproc-1 + IF( pinfo%nrecv(iproc) > 0 .AND. iproc /= mype ) THEN + off = pinfo%nrdisp(iproc) + itag=100 + CALL mpi_irecv(zrecv(off+1),pinfo%nrecv(iproc),mpi_double_precision, & + & iproc,itag,mpi_comm,reqs(irq),ierr) + irq = irq + 1 + IF( irq > nreqs ) THEN + WRITE(0,*)'parinter_fld_mult: exceeded number of reqs when posting recvs',mype + CALL abort + ENDIF + ENDIF + ENDDO + ! post isends + DO iproc=0,nproc-1 + IF( pinfo%nsend(iproc) > 0 ) THEN + IF( iproc == mype ) THEN + zrecv( pinfo%nrdisp(iproc)+1:pinfo%nrdisp(iproc)+pinfo%nsend(iproc) ) = & + & zsend( pinfo%nsdisp(iproc)+1:pinfo%nsdisp(iproc)+pinfo%nsend(iproc) ) + ELSE + off = pinfo%nsdisp(iproc) + itag=100 + CALL mpi_isend(zsend(off+1),pinfo%nsend(iproc),mpi_double_precision, & + & iproc,itag,mpi_comm,reqs(irq),ierr) + irq = irq + 1 + IF( irq > nreqs ) THEN + WRITE(0,*)'parinter_fld_mult: exceeded number of reqs when posting sends',mype + CALL abort + ENDIF + ENDIF + ENDIF + ENDDO + ! wait on requests + CALL mpi_waitall(irq,reqs,MPI_STATUSES_IGNORE,ierr) + ELSE + + CALL mpi_alltoallv(& + & zsend,pinfo%nsend(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),mpi_double_precision, & + & zrecv,pinfo%nrecv(0:nproc-1), & + & pinfo%nrdisp(0:nproc-1),mpi_double_precision, & + & mpi_comm,istatus) + + ENDIF #else zrecv(:)=zsend(:) @@ -1538,8 +1592,189 @@ SUBROUTINE parinter_fld( mype, nproc, mpi_comm, & & pinfo%remap_matrix(1,i)*zrecv(pinfo%src_address(i)) END DO +#if defined key_parinter_alloc + DEALLOCATE(zsend,zrecv) +#endif + END SUBROUTINE parinter_fld + SUBROUTINE parinter_fld_mult( nfield, & + & mype, nproc, mpi_comm, & + & pinfo, nsrclocpoints, zsrc, ndstlocpoints, zdst ) + + ! Perform nfield interpolations from the zsrc fields + ! to zdst fields based on the information in pinfo + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm, nfield + ! Interpolation setup + TYPE(parinterinfo), INTENT(IN) :: pinfo + ! Source data/ + INTEGER, INTENT(IN) :: nsrclocpoints + REAL, INTENT(IN), DIMENSION(nsrclocpoints,nfield) :: zsrc + + ! Output arguments + + ! Destination data + INTEGER, INTENT(IN):: ndstlocpoints + REAL, DIMENSION(ndstlocpoints,nfield) :: zdst + + INTEGER :: nsend(0:nproc-1), nrecv(0:nproc-1),nrdisp(0:nproc-1), nsdisp(0:nproc-1) + + ! Local variables + + ! MPI send/recv buffers +#if defined key_parinter_alloc + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: zrecvnf + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: zsend, zrecv +#else + REAL(scripdp) :: zrecvnf(pinfo%nrecvtot,nfield) + REAL(scripdp) :: zsend(pinfo%nsendtot*nfield), & + & zrecv(pinfo%nrecvtot*nfield) +#endif + ! Misc variables + INTEGER :: i,istatus,ierr + INTEGER :: nf, ibases, ibaser, np, iproc, off, itag, irq, nreqs + INTEGER :: reqs(0:2*(nproc-1)) + INTEGER, DIMENSION(nfield,0:nproc-1) :: ibaseps, ibasepr + + ! Allocate temporary arrays on heap + +#if defined key_parinter_alloc + ALLOCATE(zrecvnf(pinfo%nrecvtot,nfield),& + & zsend(pinfo%nsendtot*nfield),zrecv(pinfo%nrecvtot*nfield)) +#endif + + ! Compute starts for packing + + ibases=0 + ibaser=0 + DO np=0,nproc-1 + DO nf=1,nfield + ibaseps(nf,np) = ibases + ibasepr(nf,np) = ibaser + ibases = ibases + pinfo%nsend(np) + ibaser = ibaser + pinfo%nrecv(np) + ENDDO + ENDDO + + ! Pack the sending buffer + + !$omp parallel default(shared) private(nf,np,i) + !$omp do schedule(dynamic) + DO np=0,nproc-1 + DO nf=1,nfield + DO i=1,pinfo%nsend(np) + zsend(i+ibaseps(nf,np))=& + & zsrc(pinfo%send_address(i+pinfo%nsdisp(np)),nf) + ENDDO + ENDDO + nsend(np)=pinfo%nsend(np)*nfield + nrecv(np)=pinfo%nrecv(np)*nfield + nrdisp(np)=pinfo%nrdisp(np)*nfield + nsdisp(np)=pinfo%nsdisp(np)*nfield + ENDDO + !$omp end do + !$omp end parallel + + ! Do the message passing + +#if defined key_mpp_mpi + + IF (lparinterp2p) THEN + + ! total num of reqs ( recv + send ) + nreqs = 2*(nproc-1) + ! post irecvs first + irq = 0 + DO iproc=0,nproc-1 + IF( nrecv(iproc) > 0 .AND. iproc /= mype ) THEN + off = nrdisp(iproc) + itag=100 + CALL mpi_irecv(zrecv(off+1),nrecv(iproc),mpi_double_precision, & + & iproc,itag,mpi_comm,reqs(irq),ierr) + irq = irq + 1 + IF( irq > nreqs ) THEN + WRITE(0,*)'parinter_fld_mult: exceeded number of reqs when posting recvs',mype + CALL abort + ENDIF + ENDIF + ENDDO + ! post isends + DO iproc=0,nproc-1 + IF( nsend(iproc) > 0 ) THEN + IF( iproc == mype ) THEN + zrecv( nrdisp(iproc)+1:nrdisp(iproc)+nsend(iproc) ) = & + & zsend( nsdisp(iproc)+1:nsdisp(iproc)+nsend(iproc) ) + ELSE + off = nsdisp(iproc) + itag=100 + CALL mpi_isend(zsend(off+1),nsend(iproc),mpi_double_precision, & + & iproc,itag,mpi_comm,reqs(irq),ierr) + irq = irq + 1 + IF( irq > nreqs ) THEN + WRITE(0,*)'parinter_fld_mult: exceeded number of reqs when posting sends',mype + CALL abort + ENDIF + ENDIF + ENDIF + ENDDO + ! wait on requests + CALL mpi_waitall(irq,reqs,MPI_STATUSES_IGNORE,ierr) + + ELSE + + IF(mype==0)WRITE(0,*)'lparinterp2p off' + CALL mpi_alltoallv(& + & zsend,nsend(0:nproc-1),& + & nsdisp(0:nproc-1),mpi_double_precision, & + & zrecv,nrecv(0:nproc-1), & + & nrdisp(0:nproc-1),mpi_double_precision, & + & mpi_comm,istatus) + + ENDIF +#else + + zrecv(:)=zsend(:) + +#endif + + ! Unpack individual fields + + !$omp parallel default(shared) private(nf,np,i) + + !$omp do schedule (dynamic) + DO np=0,nproc-1 + DO nf=1,nfield + DO i=1,pinfo%nrecv(np) + zrecvnf(i+pinfo%nrdisp(np),nf)=zrecv(i+ibasepr(nf,np)) + ENDDO + ENDDO + ENDDO + !omp end do + + ! Do the interpolation + + !$omp do + DO nf=1,nfield + zdst(:,nf)=0.0 + DO i=1,pinfo%num_links + zdst(pinfo%dst_address(i),nf) = zdst(pinfo%dst_address(i),nf) + & + & pinfo%remap_matrix(1,i)*zrecvnf(pinfo%src_address(i),nf) + END DO + END DO + !$omp end do + + !$omp end parallel + +#if defined key_parinter_alloc + DEALLOCATE( zrecvnf, zsend, zrecv ) +#endif + + END SUBROUTINE parinter_fld_mult + SUBROUTINE parinter_write( mype, nproc, & & nsrcglopoints, ndstglopoints, & & pinfo, cdpath, cdprefix ) @@ -1854,4 +2089,8 @@ MODULE interinfo LOGICAL :: lparbcast = .FALSE. + ! Use multiple fields option + + LOGICAL :: lparintmultatm = .FALSE. + END MODULE interinfo From c3783c4679ef6cc6e93926551f5c14b881d9f1b2 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 22 Nov 2022 13:16:54 +0000 Subject: [PATCH 02/25] Clean up some FESOM_TODO blocks. --- src/ifs_interface/ifs_interface.F90 | 57 ----------------------------- 1 file changed, 57 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 5fbf061d2..72f344ac4 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -554,52 +554,6 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & #endif -#ifndef FESOM_TODO - - if(mype==0) then - WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' - endif - -#else - - ! Ice level temperatures - - IF (licelvls) THEN - -#if defined key_lim2 - - DO jl = 1, 3 - - ! Pack ice temperatures data at level jl(already in K) - - jk = 0 - DO jj = nldj, nlej - DO ji = nldi, nlei - jk = jk + 1 - zsend(jk) = tbif (ji,jj,jl) - ENDDO - ENDDO - - ! Interpolate ice temperature at level jl - - CALL parinter_fld( mype, npes, icomm, Ttogauss, & - & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & - & nopoints, pgistl(:,jl) ) - - ENDDO - -#else - WRITE(0,*)'licelvls needs to be sorted for LIM3' - CALL abort -#endif - - ENDIF - - IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') - IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) - -#endif - END SUBROUTINE nemogcmcoup_lim2_get @@ -918,17 +872,6 @@ SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' endif -#ifdef FESOM_TODO - iye = ndastp / 10000 - imo = ndastp / 100 - iye * 100 - ida = MOD( ndastp, 100 ) - CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) - zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS - CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) - icdate = iye * 10000 + imo * 100 + ida - ictime = ihh * 10000 + imm * 100 + iss -#endif - END SUBROUTINE nemogcmcoup_step From ff182bdf3e5ddfcc55169636eedfaee2282e54e1 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 22 Nov 2022 15:09:50 +0000 Subject: [PATCH 03/25] Added parinter_mult option to nemogcmcoup_lim2_update. --- configure_any.sh | 2 +- src/ifs_interface/ifs_interface.F90 | 218 ++++++++++++++++++---------- 2 files changed, 145 insertions(+), 75 deletions(-) diff --git a/configure_any.sh b/configure_any.sh index ceefd8d04..838bf67af 100755 --- a/configure_any.sh +++ b/configure_any.sh @@ -18,7 +18,7 @@ source env.sh # source this from your run script too if [[ ${LIB} = yes ]]; then mkdir build.lib || true # build dir for library cd build.lib - cmake -DBUILD_FESOM_AS_LIBRARY=ON -DFESOM_INSTALL_PREFIX=/perm/ne1/fesom2/ -DBUILD_SHARED_LIBS=ON -DDISABLE_MULTITHREADING=ON -DENABLE_OPENMP=OFF .. # not required when re-compiling + cmake -DBUILD_FESOM_AS_LIBRARY=ON -DFESOM_INSTALL_PREFIX=/perm/ne1/fesom2/ -DBUILD_SHARED_LIBS=ON -DDISABLE_MULTITHREADING=ON -DENABLE_OPENMP=ON .. # not required when re-compiling sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround for the moment on cray else mkdir build || true # build dir for binary diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 72f344ac4..a6db9eb60 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -20,7 +20,7 @@ SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, USE g_config, only: dt USE g_clock, only: timenew, daynew, yearnew, month, day_in_month USE nemogcmcoup_steps, ONLY : substeps - + IMPLICIT NONE ! Input arguments @@ -61,7 +61,7 @@ SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, WRITE(0,*)'! FESOM is initialized from within IFS.' WRITE(0,*)'! get MPI_COMM_FESOM. =================' WRITE(0,*)'! main_initialize done. ===============' - WRITE(0,*)'Thomas/Kristian neu' + WRITE(0,*)'Thomas/Kristian parinter_mult version' endif ! Set more information for the caller @@ -611,15 +611,15 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & !type(t_mesh), target :: mesh ! Local variables - INTEGER :: n + INTEGER :: n, jf integer, pointer :: myDim_nod2D, eDim_nod2D REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater - - ! Packed receive buffer - REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D) :: zrecv - REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D):: zrecvU, zrecvV - + ! Packed send/receive buffers + INTEGER , PARAMETER :: maxnfield = 11 + INTEGER :: nfield = 0 + REAL(wpIFS), DIMENSION(npoints,maxnfield) :: zsendnf + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D,maxnfield) :: zrecvnf !#include "associate_mesh.h" ! associate only the necessary things @@ -657,15 +657,120 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & stress_atmoce_x=0. ! Done, taux_oce stress_atmoce_y=0. ! Done, tauy_oce + ! =================================================================== ! + ! Pack all arrays + nfield = 0 + !1. Ocean solar radiation to T grid + nfield = nfield + 1 + zsendnf(:,nfield) = qs___oce(:) + + ! =================================================================== ! + !2. Ice solar radiation to T grid + ! DO NOTHING + + ! =================================================================== ! + !3. Ocean non-solar radiation to T grid (is this non-solar heat flux?) + nfield = nfield + 1 + zsendnf(:,nfield) = qns__oce(:) + + ! =================================================================== ! + !4. Non-solar radiation over ice to T grid (is this non-solar heat flux?) + nfield = nfield + 1 + zsendnf(:,nfield) = qns__ice(:) + + ! =================================================================== ! + !5. D(q)/dT to T grid + ! DO NOTHING + + + ! =================================================================== ! + !6. Total evaporation to T grid + ! =================================================================== ! + !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + ! =================================================================== ! + nfield = nfield + 1 + zsendnf(:,nfield) = evap_tot(:) + + ! =================================================================== ! + !7. Sublimation (evaporation over ice) to T grid + nfield = nfield + 1 + zsendnf(:,nfield) = evap_ice(:) ! =================================================================== ! - !1. Interpolate ocean solar radiation to T grid + !8. Interpolate liquid precipitation to T grid + nfield = nfield + 1 + zsendnf(:,nfield) = prcp_liq(:) - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & - & myDim_nod2D, zrecv ) + ! =================================================================== ! + !9. Interpolate solid precipitation to T grid + nfield = nfield + 1 + zsendnf(:,nfield) = prcp_sol(:) - ! Unpack ocean solar radiation, without halo - shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + ! =================================================================== ! + !10. Interpolate runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack runoff, without halo + !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? + ! + ! Do the halo exchange + !call exchange_nod(runoff,fesom%partit) + ! + !11. Interpolate ocean runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack ocean runoff + ! ?? + + !12. Interpolate total cloud fractions to T grid (tcc) + ! + !13. Interpolate low cloud fractions to T grid (lcc) + + + ! =================================================================== ! + ! STRESSES + + ! OVER OCEAN: + nfield = nfield + 1 + zsendnf(:,nfield) = taux_oce(:) + + nfield = nfield + 1 + zsendnf(:,nfield) = tauy_oce(:) + + ! =================================================================== ! + ! OVER ICE: + nfield = nfield + 1 + zsendnf(:,nfield) = taux_ice(:) + + nfield = nfield + 1 + zsendnf(:,nfield) = tauy_ice(:) + + ! =================================================================== ! + ! Interpolate arrays + IF (lparintmultatm) THEN + CALL parinter_fld_mult( nfield, mype, npes, icomm, gausstoT, npoints, & + & zsendnf, myDim_nod2D, & + & zrecvnf ) + ELSE + DO jf = 1, nfield + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, & + & zsendnf(:,jf), myDim_nod2D, & + & zrecvnf(:,jf) ) + ENDDO + ENDIF + + ! =================================================================== ! + ! Unpack all arrays + nfield = 0 + ! =================================================================== ! + !1. Unpack ocean solar radiation, without halo + nfield = nfield + 1 + shortwave(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) ! Do the halo exchange call exchange_nod(shortwave,fesom%partit) @@ -677,26 +782,18 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! =================================================================== ! - !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & - & myDim_nod2D, zrecv ) - - ! Unpack ocean non-solar, without halo - oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + !3. Unpack ocean non-solar, without halo + nfield = nfield + 1 + oce_heat_flux(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) ! Do the halo exchange call exchange_nod(oce_heat_flux,fesom%partit) ! =================================================================== ! - !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & - & myDim_nod2D, zrecv ) - - ! Unpack ice non-solar - ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + !4. Unpack ice non-solar + nfield = nfield + 1 + ice_heat_flux(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) ! Do the halo exchange call exchange_nod(ice_heat_flux,fesom%partit) @@ -708,28 +805,21 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! =================================================================== ! - !6. Interpolate total evaporation to T grid + !6. Unpack total evaporation to T grid ! =================================================================== ! !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice ! =================================================================== ! - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & - & myDim_nod2D, zrecv ) - ! Unpack total evaporation, without halo - evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + nfield = nfield + 1 + evap_no_ifrac(1:myDim_nod2D)=-zrecvnf(1:myDim_nod2D,nfield)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign ! Do the halo exchange call exchange_nod(evap_no_ifrac,fesom%partit) - !7. Interpolate sublimation (evaporation over ice) to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & - & myDim_nod2D, zrecv ) - - ! Unpack sublimation (evaporation over ice), without halo - sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + !7. Unpack sublimation (evaporation over ice), without halo + nfield = nfield + 1 + sublimation(1:myDim_nod2D)=-zrecvnf(1:myDim_nod2D,nfield)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign ! Do the halo exchange call exchange_nod(sublimation,fesom%partit) @@ -738,26 +828,18 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! =================================================================== ! - !8. Interpolate liquid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & - & myDim_nod2D, zrecv ) - - ! Unpack liquid precipitation, without halo - prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + !8. Unpack liquid precipitation, without halo + nfield = nfield + 1 + prec_rain(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield)/rhofwt ! kg m^(-2) s^(-1) -> m/s ! Do the halo exchange call exchange_nod(prec_rain,fesom%partit) ! =================================================================== ! - !9. Interpolate solid precipitation to T grid - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & - & myDim_nod2D, zrecv ) - - ! Unpack solid precipitation, without halo - prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + !9. Unpack solid precipitation, without halo + nfield = nfield + 1 + prec_snow(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield)/rhofwt ! kg m^(-2) s^(-1) -> m/s ! Do the halo exchange call exchange_nod(prec_snow,fesom%partit) @@ -792,38 +874,26 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! STRESSES ! OVER OCEAN: - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & - & myDim_nod2D, zrecv ) - + nfield = nfield + 1 ! Unpack x stress atm->oce, without halo; then do halo exchange - stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + stress_atmoce_x(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) call exchange_nod(stress_atmoce_x,fesom%partit) - ! - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & - & myDim_nod2D, zrecv ) - ! Unpack y stress atm->oce, without halo; then do halo exchange - stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + nfield = nfield + 1 + stress_atmoce_y(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) call exchange_nod(stress_atmoce_y,fesom%partit) ! =================================================================== ! ! OVER ICE: - - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & - & myDim_nod2D, zrecv ) - ! Unpack x stress atm->ice, without halo; then do halo exchange - stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + nfield = nfield + 1 + stress_atmice_x(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) call exchange_nod(stress_atmice_x,fesom%partit) - ! - CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & - & myDim_nod2D, zrecv ) - ! Unpack y stress atm->ice, without halo; then do halo exchange - stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + nfield = nfield + 1 + stress_atmice_y(1:myDim_nod2D)=zrecvnf(1:myDim_nod2D,nfield) call exchange_nod(stress_atmice_y,fesom%partit) From 6096fabbda43ac047f1c20191d7a5620a9546f37 Mon Sep 17 00:00:00 2001 From: "Kristian S. Mogensen" Date: Tue, 22 Nov 2022 15:14:27 +0000 Subject: [PATCH 04/25] Reduce nemogcm_update_add standard error prints. --- src/ifs_interface/ifs_notused.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index 7d8603248..754b20f60 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -211,6 +211,7 @@ SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & ! interpolation of the input gaussian grid data USE par_kind + USE fesom_main_storage_module, only: fesom => f ! only: MPI_COMM_FESOM, mype (previously in g_parsup) IMPLICIT NONE @@ -229,9 +230,10 @@ SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & ! Local variables + if(fesom%mype==0) then WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' !CALL abort - + endif END SUBROUTINE nemogcmcoup_update_add From 98f42b686797f23d9b1c4925f38cdceaa1e85a21 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Tue, 22 Nov 2022 16:30:02 +0000 Subject: [PATCH 05/25] update the intel compiler --- env/atosecmwf/shell | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env/atosecmwf/shell b/env/atosecmwf/shell index 3c5efc9c9..90f118607 100644 --- a/env/atosecmwf/shell +++ b/env/atosecmwf/shell @@ -21,7 +21,7 @@ module unload gcc # Load modules module load prgenv/intel -module load intel/2021.2.0 +module load intel/2021.4.0 module load hpcx-openmpi/2.9.0 module load intel-mkl/19.0.5 module load fftw/3.3.9 From f87bba05a4b669f3a607c1c387c107ff33789618 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Tue, 22 Nov 2022 16:34:04 +0000 Subject: [PATCH 06/25] update configure_any script --- configure_any.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure_any.sh b/configure_any.sh index 838bf67af..74cbbb15f 100755 --- a/configure_any.sh +++ b/configure_any.sh @@ -18,7 +18,7 @@ source env.sh # source this from your run script too if [[ ${LIB} = yes ]]; then mkdir build.lib || true # build dir for library cd build.lib - cmake -DBUILD_FESOM_AS_LIBRARY=ON -DFESOM_INSTALL_PREFIX=/perm/ne1/fesom2/ -DBUILD_SHARED_LIBS=ON -DDISABLE_MULTITHREADING=ON -DENABLE_OPENMP=ON .. # not required when re-compiling + cmake -DBUILD_FESOM_AS_LIBRARY=ON -DFESOM_INSTALL_PREFIX=/perm/${USER}/fesom2/ -DBUILD_SHARED_LIBS=ON -DDISABLE_MULTITHREADING=ON -DENABLE_OPENMP=ON .. # not required when re-compiling sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround for the moment on cray else mkdir build || true # build dir for binary From 70831781442b8cf2324e4e51596123735300de9e Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Wed, 23 Nov 2022 11:04:09 +0000 Subject: [PATCH 07/25] Allow extra ocean fields on IFS grid also for FESOM case; remove dummy nemogcmcoup_exflds_get subroutine in ifs_notused.F90 --- src/ifs_interface/ifs_interface.F90 | 137 ++++++++++++++++++++++++++++ src/ifs_interface/ifs_notused.F90 | 29 ------ 2 files changed, 137 insertions(+), 29 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index a6db9eb60..e7f2fc36f 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -557,6 +557,143 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & END SUBROUTINE nemogcmcoup_lim2_get +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate SSH, MLD, 20C isotherm, sea surface salinity, average T&S over upper 300m + ! from the FESOM grid to IFS's Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + USE scripremap + USE parinter + USE interinfo + USE fesom_main_storage_module, only: fesom => f + USE o_ARRAYS, only : MLD1 + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + INTEGER , PARAMETER :: maxnfield = 6 + INTEGER :: nfield = 0 + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D,maxnfield) :: zsendnf + REAL(wpIFS), DIMENSION(nopoints,maxnfield) :: zrecvnf + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + integer, pointer :: myDim_nod2D, eDim_nod2D + + ! Loop variables + INTEGER :: n, elem, ierr, jf + + !#include "associate_mesh.h" + ! associate what is needed only + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D + + + nfield = 0 + ! =================================================================== ! + ! Pack SSH data 'pgssh' is on Gauss grid. + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=fesom%dynamics%eta_n(n) ! in meters + ENDDO + + ! =================================================================== ! + ! Pack MLD data + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=-MLD1(n) ! depth at which the density over depth differs + ! by 0.125 sigma units from the surface density (Griffies et al., 2009) + ENDDO + + ! =================================================================== ! + ! Pack depth of 20C isotherm + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=-1. ! compute later, set to -1 for the moment + ENDDO + + ! =================================================================== ! + ! Pack sea surface salinity data: 'pgsss' on Gaussian grid. + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=fesom%tracers%data(2)%values(1,n) ! in psu + ENDDO + + ! =================================================================== ! + ! Pack average temp over upper 300m: 'pgtem300' on Gaussian grid. + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=-1. ! compute later, set to -1 + ENDDO + + ! =================================================================== ! + ! Pack average salinity over upper 300m: 'pgsal300' on Gaussian grid. + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=-1. ! compute later, set to -1 + ENDDO + + ! =================================================================== ! + ! Interpolate all fields + IF (lparintmultatm) THEN + CALL parinter_fld_mult( nfield, mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsendnf, & + & nopoints, zrecvnf ) + ELSE + DO jf = 1, nfield + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsendnf(:,jf), & + & nopoints, zrecvnf(:,jf) ) + ENDDO + ENDIF + + nfield = 0 + ! =================================================================== ! + ! Unpack 'pgssh' on Gauss. + nfield = nfield + 1 + pgssh(:) = zrecvnf(:,nfield) + ! + ! =================================================================== ! + ! Unpack 'pgmld' on Gauss. + nfield = nfield + 1 + pgmld(:) = zrecvnf(:,nfield) + ! + ! =================================================================== ! + ! Unpack depth of 20C isotherm data + nfield = nfield + 1 + pg20d(:) = zrecvnf(:,nfield) + + ! =================================================================== ! + ! Unpack sea surface salinity pgsss on Gaussian grid. + nfield = nfield + 1 + pgsss(:) = zrecvnf(:,nfield) + + ! =================================================================== ! + ! Unpack average temp over upper 300m pgtem300 on Gaussian grid. + nfield = nfield + 1 + pgtem300(:) = zrecvnf(:,nfield) + + ! =================================================================== ! + ! Unpack average salinity over upper 300m on Gaussian grid. + nfield = nfield + 1 + pgsal300(:) = zrecvnf(:,nfield) + + +END SUBROUTINE nemogcmcoup_exflds_get + + SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & & npoints, & & taux_oce, tauy_oce, taux_ice, tauy_ice, & diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index 754b20f60..bc711a8c6 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -88,35 +88,6 @@ SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & END SUBROUTINE nemogcmcoup_get -SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & - & nopoints, pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 ) - - ! Interpolate sst, ice: surf T; albedo; concentration; thickness, - ! snow thickness and currents from the ORCA grid to the Gaussian grid. - - ! This routine can be called at any point in time since it does - ! the necessary message passing in parinter_fld. - - USE par_kind - IMPLICIT NONE - - ! Arguments - REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & - & pgtem300, pgsal300 - ! Message passing information - INTEGER, INTENT(IN) :: mype, npes, icomm - ! Number Gaussian grid points - INTEGER, INTENT(IN) :: nopoints - - ! Local variables - - WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' - CALL abort - -END SUBROUTINE nemogcmcoup_exflds_get - - SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) ! Interpolate sst, ice and currents from the ORCA grid From 74aa7a76e9afa612f65a1f1a2e126a511f932f73 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Fri, 25 Nov 2022 10:51:04 +0000 Subject: [PATCH 08/25] add coupling of surface currents --- src/ifs_interface/ifs_interface.F90 | 65 ++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 20 deletions(-) diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index e7f2fc36f..45722b905 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -383,7 +383,7 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & INTEGER, INTENT(IN) :: nopoints ! Local variables - INTEGER , PARAMETER :: maxnfield = 6 + INTEGER , PARAMETER :: maxnfield = 8 INTEGER , PARAMETER :: maxnfielduv = 2 INTEGER :: nfield = 0 INTEGER :: nfielduv = 0 @@ -460,6 +460,27 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & zsendnf(n,nfield)=m_snow(n)/MAX(a_ice(n),0.01) ! snow thickness (mean over ice) ENDDO + ! =================================================================== ! + ! Pack U surface currents; need to be rotated to geographical grid + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=fesom%dynamics%uvnode(1,1,n) ! (u/v,level,nod2D) + ENDDO + + ! =================================================================== ! + ! Pack V surface currents; need to be rotated to geographical grid + nfield = nfield + 1 + DO n=1,myDim_nod2D + zsendnf(n,nfield)=fesom%dynamics%uvnode(2,1,n) ! (u/v,level,nod2D) + ENDDO + + ! Rotate vectors (U,V) to geographical coordinates (r2g) + DO n=1,myDim_nod2D + rlon=coord_nod2D(1,n) + rlat=coord_nod2D(2,n) + CALL vector_r2g(zsendnf(n,nfield-1), zsendnf(n,nfield), rlon, rlat, 0) ! 0-flag for rot. coord + ENDDO + ! =================================================================== ! ! Interpolate all fields IF (lparintmultatm) THEN @@ -508,24 +529,28 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & pghsn(:) = zrecvnf(:,nfield) ! =================================================================== ! - ! Surface currents need to be rotated to geographical grid - - ! Pack u(v) surface currents - zsendnfUV(:,1)=fesom%dynamics%UV(1,1,1:myDim_elem2D) - zsendnfUV(:,2)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here - nfielduv = 2 - - do elem=1, myDim_elem2D - - ! compute element midpoints - elnodes=elem2D_nodes(:,elem) - rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS - rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS - - ! Rotate vectors to geographical coordinates (r2g) - CALL vector_r2g(zsendnfUV(elem,1), zsendnfUV(elem,2), rlon, rlat, 0) ! 0-flag for rot. coord + ! Unpack surface currents data pgucur/pgvcur on Gaussian grid. + nfield = nfield + 1 + pgucur(:) = zrecvnf(:,nfield) + nfield = nfield + 1 + pgvcur(:) = zrecvnf(:,nfield) - end do + ! Pack u(v) surface currents on elements + !zsendnfUV(:,1)=fesom%dynamics%UV(1,1,1:myDim_elem2D) + !zsendnfUV(:,2)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here + !nfielduv = 2 + ! + !do elem=1, myDim_elem2D + ! + ! ! compute element midpoints + ! elnodes=elem2D_nodes(:,elem) + ! rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS + ! rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS + ! + ! ! Rotate vectors to geographical coordinates (r2g) + ! CALL vector_r2g(zsendnfUV(elem,1), zsendnfUV(elem,2), rlon, rlat, 0) ! 0-flag for rot. coord + ! + !end do #ifdef FESOM_TODO @@ -549,8 +574,8 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & #else - pgucur(:) = 0.0 - pgvcur(:) = 0.0 + !pgucur(:) = 0.0 + !pgvcur(:) = 0.0 #endif From 234d21cbb759aa4f8f47ba3dd39c058ac4ba4d95 Mon Sep 17 00:00:00 2001 From: Thomas Rackow Date: Mon, 28 Nov 2022 13:48:53 +0000 Subject: [PATCH 09/25] include the extra diagnostics by Dima --- config/namelist.io | 1 + src/gen_modules_diag.F90 | 97 +++++++++++++++++++++++++++++++++++----- 2 files changed, 88 insertions(+), 10 deletions(-) diff --git a/config/namelist.io b/config/namelist.io index 0a3270c4a..9fd885479 100644 --- a/config/namelist.io +++ b/config/namelist.io @@ -7,6 +7,7 @@ ldiag_salt3D =.false. ldiag_dMOC =.false. ldiag_DVD =.false. ldiag_forc =.false. +ldiag_extflds =.true. / &nml_listsize diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index d9f924a0c..18ae3d9cb 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -16,14 +16,14 @@ module diagnostics implicit none private - public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, & - ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, ldiag_vorticity, & - compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & - u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, & - utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, & - v_surf, u_bott, v_bott, std_dens_min, std_dens_max, std_dens_N, std_dens, & - std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, & - dens_flux_e, vorticity, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & + public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, & + ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, ldiag_vorticity, ldiag_extflds, & + compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & + u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, & + utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, & + v_surf, u_bott, v_bott, std_dens_min, std_dens_max, std_dens_N, std_dens, & + std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, & + dens_flux_e, vorticity, zisotherm, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd ! Arrays used for diagnostics, some shall be accessible to the I/O ! 1. solver diagnostics: A*x=rhs? @@ -37,7 +37,8 @@ module diagnostics real(kind=WP), save, allocatable, target :: utau_surf(:), utau_bott(:) real(kind=WP), save, allocatable, target :: stress_bott(:,:), u_bott(:), v_bott(:), u_surf(:), v_surf(:) real(kind=WP), save, allocatable, target :: vorticity(:,:) - + real(kind=WP), save, allocatable, target :: zisotherm(:) !target temperature is specified as whichtemp in compute_extflds + real(kind=WP), save, allocatable, target :: tempzavg(:), saltzavg(:) !target depth for averaging is specified as whichdepth in compute_extflds ! defining a set of standard density bins which will be used for computing densMOC ! integer, parameter :: std_dens_N = 100 ! real(kind=WP), save, target :: std_dens(std_dens_N) @@ -74,9 +75,10 @@ module diagnostics logical :: ldiag_forc =.false. logical :: ldiag_vorticity =.false. + logical :: ldiag_extflds =.false. namelist /diag_list/ ldiag_solver, lcurt_stress_surf, ldiag_curl_vel3, ldiag_energy, & - ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc, ldiag_vorticity + ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc, ldiag_vorticity, ldiag_extflds contains @@ -780,6 +782,79 @@ subroutine relative_vorticity(mode, dynamics, partit, mesh) ! Now it the relative vorticity known on neighbors too end subroutine relative_vorticity +! +! +!_______________________________________________________________________________ +subroutine compute_extflds(mode, dynamics, tracers, partit, mesh) + IMPLICIT NONE + integer, intent(in) :: mode + logical, save :: firstcall=.true. + type(t_dyn) , intent(in), target :: dynamics + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP) :: zn, zint, tup, tlo + integer :: n, nz, nzmin, nzmax + real(kind=WP) :: whichtemp= 20.0_WP ! which isotherm to compute (set 20 per default) + real(kind=WP) :: whichdepth=300.0_WP ! for which tepth to average for tempzavg & saltzavg + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + if (firstcall) then !allocate the stuff at the first call + allocate(zisotherm(myDim_nod2D+eDim_nod2D)) + allocate(tempzavg(myDim_nod2D+eDim_nod2D), saltzavg(myDim_nod2D+eDim_nod2D)) + firstcall=.false. + if (mode==0) return + end if + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, zn, tup, tlo) + DO n=1, myDim_nod2D + saltzavg(n) =0.0_WP + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + zn =0.0_WP + do nz=nzmin+1, nzmax-1 + tup=temp(nz-1, n) + tlo=temp(nz, n) + if (tup==tlo) cycle + if ((tup-whichtemp)*(tlo-whichtemp)<=0) then + zn=zn+0.5_WP*(hnode(nz-1, n)+(whichtemp-tup)*sum(hnode(nz-1:nz, n))/(tlo-tup)) + zisotherm(n)=zn + exit + end if + zn=zn+hnode(nz-1, n) + end do +! if (tlo > whichtemp .AND. depth<=1.e-12) zisotherm=depth+hnode(nz, node) set the depth to the total depth if the isotherm is not found + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, zint) + DO n=1, myDim_nod2D + tempzavg(n) =0.0_WP + saltzavg(n) =0.0_WP + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + zint=0.0_WP + do nz=nzmin, nzmax-1 + zint=zint+hnode(nz, n) + tempzavg(n)=tempzavg(n)+temp(nz, n)*hnode(nz, n) + saltzavg(n)=saltzavg(n)+salt(nz, n)*hnode(nz, n) + if (zint>=whichdepth) exit + end do + tempzavg(n)=tempzavg(n)/zint + saltzavg(n)=saltzavg(n)/zint + END DO +!$OMP END PARALLEL DO + + call exchange_nod(zisotherm, partit) + call exchange_nod(tempzavg, partit) + call exchange_nod(saltzavg, partit) +end subroutine compute_extflds @@ -813,6 +888,8 @@ subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) ! compute relative vorticity if (ldiag_vorticity) call relative_vorticity(mode, dynamics, partit, mesh) + ! soe exchanged fields requested by IFS/FESOM in NextGEMS. + if (ldiag_extflds) call compute_extflds(mode, dynamics, tracers, partit, mesh) end subroutine compute_diagnostics From f26987c4d3f95e588676584c2e47a0eeb577d28b Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 5 Dec 2022 15:37:56 +0100 Subject: [PATCH 10/25] improve chainjob scriptfile with restarts from restart folder instead of file --- work/job_albedo_chain | 29 ++++++++++++++++------------- work/job_ollie_chain | 31 +++++++++++++++++-------------- 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/work/job_albedo_chain b/work/job_albedo_chain index b7ca3b1fe..27b4f7d8a 100755 --- a/work/job_albedo_chain +++ b/work/job_albedo_chain @@ -244,24 +244,27 @@ if [ $is_newsimul -eq 1 ] ; then prev_chain_id=$(( $chain_id - 1 )) #_______________________________________________________________________ - # copy restart ocean files from previous spinup cycle - prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.oce.restart.nc - if [ ! -f "$prev_rfile" ]; then + # copy restart ocean files/directories from previous spinup cycle + prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.oce.restart + if [ -d "${prev_rfile}" ]; then + cp -r $prev_rfile $dname_result_link/fesom.${aux_yr}.oce.restart + elif [ -f "${prev_rfile}.nc" ]; then + cp $prev_rfile $dname_result_link/fesom.${aux_yr}.oce.restart.nc + else echo -e "\033[1;31m --> ERROR: could not find ocean restart file \033[0m" exit - else - echo -e "\033[33m --> create ocean warm start files \033[0m" - cp $prev_rfile $dname_result_link/fesom.${aux_yr}.oce.restart.nc fi - # copy restart ice files from previous spinup cycle - prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.ice.restart.nc - if [ ! -f "$prev_rfile" ]; then + + # copy restart ice files/files/directories from previous spinup cycle + prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.ice.restart + if [ -d "${prev_rfile}" ]; then + cp -r $prev_rfile $dname_result_link/fesom.${aux_yr}.ice.restart + elif [ -f "${prev_rfile}.nc" ]; then + cp $prev_rfile $dname_result_link/fesom.${aux_yr}.ice.restart.nc + else echo -e "\033[1;31m --> ERROR: could not find ice restart file \033[0m" exit - else - echo -e "\033[33m --> create ice warm start files \033[0m" - cp $prev_rfile $dname_result_link/fesom.${aux_yr}.ice.restart.nc - fi + fi #_______________________________________________________________________ # adapt year new in namelist.config otherwise fesom is not doing a diff --git a/work/job_ollie_chain b/work/job_ollie_chain index 4a32026ac..bd4a17dd7 100755 --- a/work/job_ollie_chain +++ b/work/job_ollie_chain @@ -234,29 +234,32 @@ if [ $is_newsimul -eq 1 ] ; then prev_chain_id=$(( $chain_id - 1 )) #_______________________________________________________________________ - # copy restart ocean files from previous spinup cycle - prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.oce.restart.nc - if [ ! -f "$prev_rfile" ]; then + # copy restart ocean files/directories from previous spinup cycle + prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.oce.restart + if [ -d "${prev_rfile}" ]; then + cp -r $prev_rfile $dname_result_link/fesom.${aux_yr}.oce.restart + elif [ -f "${prev_rfile}.nc" ]; then + cp $prev_rfile $dname_result_link/fesom.${aux_yr}.oce.restart.nc + else echo -e "\033[1;31m --> ERROR: could not find ocean restart file \033[0m" exit - else - echo -e "\033[33m --> create ocean warm start files \033[0m" - cp $prev_rfile $dname_result_link/fesom.${aux_yr}.oce.restart.nc fi - # copy restart ice files from previous spinup cycle - prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.ice.restart.nc - if [ ! -f "$prev_rfile" ]; then + + # copy restart ice files/files/directories from previous spinup cycle + prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.ice.restart + if [ -d "${prev_rfile}" ]; then + cp -r $prev_rfile $dname_result_link/fesom.${aux_yr}.ice.restart + elif [ -f "${prev_rfile}.nc" ]; then + cp $prev_rfile $dname_result_link/fesom.${aux_yr}.ice.restart.nc + else echo -e "\033[1;31m --> ERROR: could not find ice restart file \033[0m" exit - else - echo -e "\033[33m --> create ice warm start files \033[0m" - cp $prev_rfile $dname_result_link/fesom.${aux_yr}.ice.restart.nc - fi + fi #_______________________________________________________________________ # adapt year new in namelist.config otherwise fesom is not doing a # restart - aux=$(grep "yearnew=" namelist.config | cut -d "=" -f 2 | cut -d " " -f 1 ) + aux=$(grep "yearnew=" namelist.config | cut -d "=" -f 2 | cut -d " " -f 1 ) sed -i " s/yearnew=$aux/yearnew=$aux_yr/" namelist.config #_______________________________________________________________________ From 3f9c23e18b0268b518b2174242a6e834f848b6d4 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 5 Dec 2022 16:46:17 +0100 Subject: [PATCH 11/25] improve job_init_albedo file --- work/job_ini_albedo | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/work/job_ini_albedo b/work/job_ini_albedo index 49cad7a75..da6c9aeba 100755 --- a/work/job_ini_albedo +++ b/work/job_ini_albedo @@ -1,16 +1,19 @@ #!/bin/bash #SBATCH --account=clidyn.p_fesom # edit your account #SBATCH --partition=mpp -#SBATCH --time=12:00:00 -#SBATCH --ntasks=128 # Number of tasks (MPI) tasks to be launched +#SBATCH --time=00:30:00 +#SBATCH --ntasks=1 # Number of tasks (MPI) tasks to be launched +#SBATCH --cpus-per-task 1 +#SBATCH --hint=nomultithread + #SBATCH --job-name=fesom2.0_INI #SBATCH -o slurm-out.out #SBATCH -e slurm-err.out -set -x - -ulimit -s unlimited +#set -x +#ulimit -s unlimited +export OMP_NUM_THREADS=1 source ../env/albedo/shell ln -s ../bin/fesom_ini.x . # cp -n ../bin/fvom_ini.x @@ -18,6 +21,10 @@ cp -n ../config/namelist.config . cp -n ../config/namelist.forcing . cp -n ../config/namelist.oce . cp -n ../config/namelist.ice . +cp -n ../config/namelist.io . +cp -n ../config/namelist.dyn . +cp -n ../config/namelist.tra . +cp -n ../config/namelist.cvmix . # determine JOBID From 1349d07d926b027aa9eb36da5ccb5bd48b63fc94 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 5 Dec 2022 16:49:31 +0100 Subject: [PATCH 12/25] improve ../work/job_ini_albedo --- work/job_ini_albedo | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/work/job_ini_albedo b/work/job_ini_albedo index da6c9aeba..09b6ab180 100755 --- a/work/job_ini_albedo +++ b/work/job_ini_albedo @@ -31,6 +31,6 @@ cp -n ../config/namelist.cvmix . JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` date -srun --mpi=pmi2 --ntasks=1 ./fesom_ini.x > "fvom_ini.out" +srun --mpi=pmi2 --ntasks=1 ./fesom_ini.x > "fesom2_ini.out" date From 69e112a226652364e167b4b5c52a285138ea791f Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 6 Dec 2022 10:20:08 +0100 Subject: [PATCH 13/25] update qoa criteria for albedo job submit scripts --- work/job_albedo | 3 ++- work/job_albedo_chain | 1 + work/job_ini_albedo | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/work/job_albedo b/work/job_albedo index 2a2780010..1565f49dd 100755 --- a/work/job_albedo +++ b/work/job_albedo @@ -1,7 +1,8 @@ #!/bin/bash #SBATCH --account=clidyn.p_fesom # edit your account #SBATCH --partition=mpp -#SBATCH --time=00:30:00 +#SBATCH --time=12:00:00 +#SBATCH --qos=12h # Slurm QOS; Default: 30min #SBATCH --ntasks=256 # Number of tasks (MPI) tasks to be launched #SBATCH --cpus-per-task 1 #SBATCH --hint=nomultithread diff --git a/work/job_albedo_chain b/work/job_albedo_chain index 27b4f7d8a..c3bd7a1d2 100755 --- a/work/job_albedo_chain +++ b/work/job_albedo_chain @@ -8,6 +8,7 @@ #SBATCH --hint=nomultithread #SBATCH --time=10:00:00 +#SBATCH --qos=12h #SBATCH --mail-type=END #SBATCH --mail-user=Patrick.Scholz@awi.de #SBATCH -o fesom2_%x_%j.out diff --git a/work/job_ini_albedo b/work/job_ini_albedo index 09b6ab180..1f7ea8eba 100755 --- a/work/job_ini_albedo +++ b/work/job_ini_albedo @@ -2,6 +2,7 @@ #SBATCH --account=clidyn.p_fesom # edit your account #SBATCH --partition=mpp #SBATCH --time=00:30:00 +#SBATCH --qos=30min #SBATCH --ntasks=1 # Number of tasks (MPI) tasks to be launched #SBATCH --cpus-per-task 1 #SBATCH --hint=nomultithread From 03cfebd52269af2c86f86c177884bdc169e4789f Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 6 Dec 2022 12:11:28 +0100 Subject: [PATCH 14/25] copy all necessary namelists --- work/job_albedo_chain | 4 ++-- work/job_ollie_chain | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/work/job_albedo_chain b/work/job_albedo_chain index c3bd7a1d2..a1643b82e 100755 --- a/work/job_albedo_chain +++ b/work/job_albedo_chain @@ -221,7 +221,7 @@ if [ $is_newsimul -eq 1 ] ; then #___BACKUP NAMELIST.* FILES INTO RESULT DIRECTORY_______________________ cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix ${dname_result}/. + namelist.cvmix namelist.dyn namelisst.tra ${dname_result}/. cp fesom.x ${dname_result}/. #___BACKUP SRC FILES INTO RESULT DIRECTORY______________________________ @@ -277,7 +277,7 @@ if [ $is_newsimul -eq 1 ] ; then # backup namelist.* & fesom.x in case they dont exist if [ ! -f "${dname_result}/namelist.config" ]; then cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.tra namelist.dyn namelist.cvmix ${dname_result}/. + namelist.cvmix namelist.tra namelist.dyn ${dname_result}/. fi if [ ! -f "${dname_result}/fesom.x" ]; then cp fesom.x ${dname_result}/. diff --git a/work/job_ollie_chain b/work/job_ollie_chain index bd4a17dd7..d4fd283c6 100755 --- a/work/job_ollie_chain +++ b/work/job_ollie_chain @@ -210,7 +210,7 @@ if [ $is_newsimul -eq 1 ] ; then #___BACKUP NAMELIST.* FILES INTO RESULT DIRECTORY_______________________ cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix ${dname_result}/. + namelist.cvmix namelist.tra namelist.dyn ${dname_result}/. cp fesom.x ${dname_result}/. #___BACKUP SRC FILES INTO RESULT DIRECTORY______________________________ @@ -266,7 +266,7 @@ if [ $is_newsimul -eq 1 ] ; then # backup namelist.* & fesom.x in case they dont exist if [ ! -f "${dname_result}/namelist.config" ]; then cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix ${dname_result}/. + namelist.cvmix namelist.tra namelist.dyn ${dname_result}/. fi if [ ! -f "${dname_result}/fesom.x" ]; then cp fesom.x ${dname_result}/. From 488567d17da8db2407492570f73a597f09bad629 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Tue, 6 Dec 2022 12:56:25 +0100 Subject: [PATCH 15/25] Update namelist.io --- config/namelist.io | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/namelist.io b/config/namelist.io index 80a34f76a..0f308f928 100644 --- a/config/namelist.io +++ b/config/namelist.io @@ -7,7 +7,7 @@ ldiag_salt3D =.false. ldiag_dMOC =.false. ldiag_DVD =.false. ldiag_forc =.false. -ldiag_extflds =.true. +ldiag_extflds =.false. / &nml_general From 9dde5c1d2e98b514c37b6c52e7d05a97622bde05 Mon Sep 17 00:00:00 2001 From: Nikolay Koldunov Date: Fri, 9 Dec 2022 12:05:49 +0100 Subject: [PATCH 16/25] update levante environment --- env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.sh b/env.sh index 81b8b8d8b..950052e68 100755 --- a/env.sh +++ b/env.sh @@ -27,7 +27,7 @@ fi if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then STRATEGY="mistral.dkrz.de" -elif [[ $LOGINHOST =~ ^l[A-Za-z0-9]+\.atos\.local$ ]]; then +elif [[ $LOGINHOST =~ ^l[A-Za-z0-9]+\.lvt\.dkrz\.de$ ]]; then STRATEGY="levante.dkrz.de" elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then STRATEGY="ollie" From a053820ac17f3c3fd185a098a853fe11db947ca1 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 14 Dec 2022 16:06:46 +0100 Subject: [PATCH 17/25] optimize compiler flags and environment variables for albedo, to achive almost same runtime as on levante --- CMakeLists.txt | 1 + env/albedo/shell | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5155b05d2..7ec4985d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,6 +14,7 @@ set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs F set(CRAY OFF CACHE BOOL "compile with cray ftn") set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") set(OPENMP_REPRODUCIBLE OFF CACHE BOOL "serialize OpenMP loops that are critical for reproducible results") + #set(VERBOSE OFF CACHE BOOL "toggle debug output") #add_subdirectory(oasis3-mct/lib/psmile) add_subdirectory(src) diff --git a/env/albedo/shell b/env/albedo/shell index 427d3ce0a..294bb7e54 100644 --- a/env/albedo/shell +++ b/env/albedo/shell @@ -1,9 +1,20 @@ # make the contents as shell agnostic as possible so we can include them with bash, zsh and others - module load intel-oneapi-compilers +export FC="mpiifort -qmkl" CC=mpiicc CXX=mpiicpc module load intel-oneapi-mkl/2022.1.0 module load intel-oneapi-mpi/2021.6.0 -export FC=mpiifort CC=mpiicc CXX=mpiicpc - module load netcdf-fortran/4.5.4-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 -module load netcdf-c/4.8.1-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 \ No newline at end of file +module load netcdf-c/4.8.1-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 + +# from DKRZ recommented environment variables on levante +# (https://docs.dkrz.de/doc/levante/running-jobs/runtime-settings.html) +export HCOLL_ENABLE_MCAST_ALL="0" +export HCOLL_MAIN_IB=mlx5_0:1 +export UCX_IB_ADDR_TYPE=ib_global +export UCX_NET_DEVICES=mlx5_0:1 +export UCX_TLS=mm,knem,cma,dc_mlx5,dc_x,self # this line here brings the most speedup factor ~1.5 +export UCX_UNIFIED_MODE=y +export UCX_HANDLE_ERRORS=bt +export HDF5_USE_FILE_LOCKING=FALSE +export I_MPI_PMI=pmi2 +export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so \ No newline at end of file From f751b64a349456b3d690b4ee6b1046bcfa28615c Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 14 Dec 2022 16:25:39 +0100 Subject: [PATCH 18/25] actualize albedo job scripts --- work/job_albedo | 29 ++++++++++++++++++----------- work/job_albedo_chain | 27 ++++++++++++++++----------- 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/work/job_albedo b/work/job_albedo index 1565f49dd..fb0d4c5bb 100755 --- a/work/job_albedo +++ b/work/job_albedo @@ -1,21 +1,23 @@ #!/bin/bash #SBATCH --account=clidyn.p_fesom # edit your account +#SBATCH --job-name=run #SBATCH --partition=mpp -#SBATCH --time=12:00:00 -#SBATCH --qos=12h # Slurm QOS; Default: 30min -#SBATCH --ntasks=256 # Number of tasks (MPI) tasks to be launched +#SBATCH --time=01:00:00 +#SBATCH --qos=12h +#SBATCH --nodes=48 # Number of tasks (MPI) tasks to be launched +#SBATCH --tasks-per-node 127 # using all 128CPus we exceed the maxload limit (>130) of many used nodes #SBATCH --cpus-per-task 1 -#SBATCH --hint=nomultithread -#SBATCH --job-name=fesom2 -#SBATCH -o slurm-out.out -#SBATCH -e slurm-err.out +#SBATCH -o fesom2_%x_%j.out +#SBATCH -e fesom2_%x_%j.out -#set -x -#ulimit -s unlimited +# disable hyperthreading +#SBATCH --hint=nomultithread -export OMP_NUM_THREADS=1 +module purge source ../env/albedo/shell +export OMP_NUM_THREADS=1 +ulimit -s unlimited # determine JOBID JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` @@ -30,8 +32,13 @@ cp -n ../config/namelist.ice . cp -n ../config/namelist.io . cp -n ../config/namelist.icepack . +#___DETERMINE SLURM JOBID+OUTPUTFILE____________________________________________ +jobid=$(echo $SLURM_JOB_ID | cut -d"." -f1) +fname="fesom2_${SLURM_JOB_NAME}_${jobid}.out" + +#___PUT JOB IN QUEUE____________________________________________________________ date -srun --mpi=pmi2 ./fesom.x > "fesom2.out" +srun --mpi=pmi2 ./fesom.x >> ${fname} date #qstat -f $PBS_JOBID diff --git a/work/job_albedo_chain b/work/job_albedo_chain index a1643b82e..61e5b52de 100755 --- a/work/job_albedo_chain +++ b/work/job_albedo_chain @@ -1,19 +1,24 @@ #!/bin/bash #___SET SLURM OPTIONS___________________________________________________________ -#SBATCH --account=clidyn.p_fesom +#SBATCH --account=clidyn.p_fesom # edit your account +#SBATCH --job-name=chain #SBATCH --partition=mpp -#SBATCH -J chain -#SBATCH --ntasks=256 -#SBATCH --cpus-per-task 1 + +# disable hyperthreading #SBATCH --hint=nomultithread +#SBATCH --nodes=48 # Number of tasks (MPI) tasks to be launched +#SBATCH --tasks-per-node 127 # using all 128CPus we exceed the maxload limit (>130) of many used nodes +#SBATCH --cpus-per-task 1 + +#SBATCH --time=12:00:00 +#SBATCH --qos=12h -#SBATCH --time=10:00:00 -#SBATCH --qos=12h -#SBATCH --mail-type=END -#SBATCH --mail-user=Patrick.Scholz@awi.de #SBATCH -o fesom2_%x_%j.out #SBATCH -e fesom2_%x_%j.out +##SBATCH --mail-type=END +##SBATCH --mail-user=Patrick.Scholz@awi.de + #___DEFAULT INPUT_______________________________________________________________ # how many job chains should be applied @@ -221,7 +226,7 @@ if [ $is_newsimul -eq 1 ] ; then #___BACKUP NAMELIST.* FILES INTO RESULT DIRECTORY_______________________ cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix namelist.dyn namelisst.tra ${dname_result}/. + namelist.cvmix ${dname_result}/. cp fesom.x ${dname_result}/. #___BACKUP SRC FILES INTO RESULT DIRECTORY______________________________ @@ -265,7 +270,7 @@ if [ $is_newsimul -eq 1 ] ; then else echo -e "\033[1;31m --> ERROR: could not find ice restart file \033[0m" exit - fi + fi #_______________________________________________________________________ # adapt year new in namelist.config otherwise fesom is not doing a @@ -277,7 +282,7 @@ if [ $is_newsimul -eq 1 ] ; then # backup namelist.* & fesom.x in case they dont exist if [ ! -f "${dname_result}/namelist.config" ]; then cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix namelist.tra namelist.dyn ${dname_result}/. + namelist.tra namelist.dyn namelist.cvmix ${dname_result}/. fi if [ ! -f "${dname_result}/fesom.x" ]; then cp fesom.x ${dname_result}/. From 6a44ffe553ec88b331dc154816d3e667d83cf5e4 Mon Sep 17 00:00:00 2001 From: Jan Streffing Date: Tue, 20 Dec 2022 09:13:25 +0100 Subject: [PATCH 19/25] fix typo --- src/io_meandata.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index d71e293e9..d5d4e4a39 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -242,10 +242,10 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('prec ') sel_forcvar(5) = 1 - call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'prec', 'precipitation rain', 'm/s', prec_rain(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('snow ') sel_forcvar(6) = 1 - call def_stream(nod2D, myDim_nod2D, 'snow', 'precicipation snow', 'm/s', prec_snow(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'snow', 'precipitation snow', 'm/s', prec_snow(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('tair ') sel_forcvar(3) = 1 call def_stream(nod2D, myDim_nod2D, 'tair', 'surface air temperature', '°C', Tair(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) From 383d5ed879f4fc2a1b6038e6d62728c22cf03644 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 21 Dec 2022 16:27:21 +0100 Subject: [PATCH 20/25] alternate environment and cmake settings to include Jan aleph workaround on albedo --- env/albedo/shell | 7 +++++-- src/CMakeLists.txt | 18 +++++++++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/env/albedo/shell b/env/albedo/shell index 294bb7e54..d8d22a64e 100644 --- a/env/albedo/shell +++ b/env/albedo/shell @@ -1,8 +1,9 @@ # make the contents as shell agnostic as possible so we can include them with bash, zsh and others module load intel-oneapi-compilers export FC="mpiifort -qmkl" CC=mpiicc CXX=mpiicpc -module load intel-oneapi-mkl/2022.1.0 module load intel-oneapi-mpi/2021.6.0 + +module load intel-oneapi-mkl/2022.1.0 module load netcdf-fortran/4.5.4-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 module load netcdf-c/4.8.1-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 @@ -17,4 +18,6 @@ export UCX_UNIFIED_MODE=y export UCX_HANDLE_ERRORS=bt export HDF5_USE_FILE_LOCKING=FALSE export I_MPI_PMI=pmi2 -export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so \ No newline at end of file +export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so + +export ENABLE_ALBEDO_INTELMPI_WORKAROUNDS='' \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 77d2f191a..14e030899 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -13,6 +13,12 @@ if(DEFINED ENV{ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS}) # be able to set the initial else() option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" OFF) endif() +if(DEFINED ENV{ENABLE_ALBEDO_INTELMPI_WORKAROUNDS}) # be able to set the initial cache value from our env settings for aleph, not only via cmake command + option(ALBEDO_INTELMPI_WORKAROUNDS "workaround for performance issues on albedo" ON) +else() + option(ALBEDO_INTELMPI_WORKAROUNDS "workaround for performance issues on albedo" OFF) +endif() + if(ALEPH_CRAYMPICH_WORKAROUNDS) # todo: enable these options only for our targets @@ -24,8 +30,11 @@ if(ALEPH_CRAYMPICH_WORKAROUNDS) #add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now add_compile_options(-DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS) endif() +if(ALBEDO_INTELMPI_WORKAROUNDS) + add_compile_options(-DENABLE_ALBEDO_INTELMPI_WORKAROUNDS) +endif() -option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) +option(DISABLE_MULTITHREADING "disable asynchronous operations" ON) option(ENABLE_OPENACC "compile with OpenACC support" OFF) set(NV_GPU_ARCH "cc80" CACHE STRING "GPU arch for nvfortran compiler (cc35,cc50,cc60,cc70,cc80,...)") @@ -129,13 +138,16 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) else() target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -ip -init=zero -no-wrap-margin) endif() -# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) + elseif(${FESOM_PLATFORM_STRATEGY} STREQUAL albedo) + target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -O3 -ip -fPIC -qopt-malloc-options=2 -qopt-prefetch=5 -unroll-aggressive) #NEC mpi option else() target_compile_options(${PROJECT_NAME} PRIVATE -xHost) endif() + target_compile_options(${PROJECT_NAME} PRIVATE -g -traceback ) #-check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) # target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From a7ac73cb685f57c4bada7da2ed6d57d9a39af509 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 21 Dec 2022 16:41:38 +0100 Subject: [PATCH 21/25] clean up and comment some of Jans routine so i was able to debug them and implement Jans aleph workaround also for albedo --- src/info_module.F90 | 5 + src/io_fesom_file.F90 | 4 + src/io_meandata.F90 | 1502 +++++++++++++++++++++++------------------ 3 files changed, 840 insertions(+), 671 deletions(-) diff --git a/src/info_module.F90 b/src/info_module.F90 index ca240c743..49522d9c4 100644 --- a/src/info_module.F90 +++ b/src/info_module.F90 @@ -97,6 +97,11 @@ subroutine print_definitions() #else print '(g0)', 'ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS is OFF' #endif +#ifdef ENABLE_ALBEDO_INTELMPI_WORKAROUNDS + print '(g0)', 'ENABLE_ALBEDO_INTELMPI_WORKAROUNDS is ON' +#else + print '(g0)', 'ENABLE_ALBEDO_INTELMPI_WORKAROUNDS is OFF' +#endif #ifdef ENABLE_NVHPC_WORKAROUNDS print '(g0)', 'ENABLE_NVHPC_WORKAROUNDS is ON' #else diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 index f919a2859..39dd8d247 100644 --- a/src/io_fesom_file.F90 +++ b/src/io_fesom_file.F90 @@ -197,6 +197,8 @@ subroutine read_and_scatter_variables(this) #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround call MPI_Barrier(this%comm, mpierr) +#elif ENABLE_ALBEDO_INTELMPI_WORKAROUNDS + call MPI_Barrier(this%comm, mpierr) #endif if(this%is_iorank()) then if(is_2d) then @@ -263,6 +265,8 @@ subroutine gather_and_write_variables(this) #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround call MPI_Barrier(this%comm, mpierr) +#elif ENABLE_ALBEDO_INTELMPI_WORKAROUNDS + call MPI_Barrier(this%comm, mpierr) #endif ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index d71e293e9..924b4328d 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -56,9 +56,9 @@ module io_MEANDATA ! !-------------------------------------------------------------------------------------------- ! - type(Meandata), save, target :: io_stream(150) ! todo: find a way to increase the array withhout move_alloc to keep the derived types in Meandata intact - integer, save :: io_NSTREAMS=0 - real(kind=WP) :: ctime !current time in seconds from the beginning of the year + type(Meandata), save, target :: io_stream(150) ! todo: find a way to increase the array withhout move_alloc to keep the derived types in Meandata intact + integer, save :: io_NSTREAMS=0 + real(kind=WP) :: ctime !current time in seconds from the beginning of the year ! !-------------------------------------------------------------------------------------------- ! @@ -87,68 +87,82 @@ module io_MEANDATA !-------------------------------------------------------------------------------------------- ! - subroutine destructor(this) +! +! +!_______________________________________________________________________________ +! not sure why this is needed --> seems to become method of meandata stream object +! type Meandata +! private +! ... +! contains +! final destructor +! end type +subroutine destructor(this) type(Meandata), intent(inout) :: this ! EO args call assert_nf(nf_close(this%ncid), __LINE__) - end subroutine - - +end subroutine +! +! +!_______________________________________________________________________________ +! define 2d/3d meandata stream parameter subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) - use MOD_MESH - use MOD_TRACER - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - USE MOD_ICE - use g_cvmix_tke - use g_cvmix_idemix - use g_cvmix_kpp - use g_cvmix_tidal - use diagnostics - implicit none - integer :: i, j - integer, save :: nm_io_unit = 103 ! unit to open namelist file, skip 100-102 for cray - integer :: iost - integer,dimension(15) :: sel_forcvar=0 - character(len=10) :: id_string + use MOD_MESH + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE MOD_ICE + use g_cvmix_tke + use g_cvmix_idemix + use g_cvmix_kpp + use g_cvmix_tidal + use diagnostics + implicit none + integer :: i, j + integer, save :: nm_io_unit = 103 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + integer,dimension(15) :: sel_forcvar=0 + character(len=10) :: id_string - type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - type(t_ice) , intent(in) , target :: ice - namelist /nml_general / io_listsize, vec_autorotate - namelist /nml_list / io_list + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice + namelist /nml_general / io_listsize, vec_autorotate + namelist /nml_list / io_list #include "associate_part_def.h" #include "associate_mesh_def.h" #include "associate_part_ass.h" #include "associate_mesh_ass.h" - ! OPEN and read namelist for I/O - open( unit=nm_io_unit, file='namelist.io', form='formatted', access='sequential', status='old', iostat=iost ) - if (iost == 0) then - if (mype==0) WRITE(*,*) ' file : ', 'namelist.io',' open ok' - else - if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.io',' ; iostat=',iost - call par_ex(partit%MPI_COMM_FESOM, partit%mype) - stop - endif - READ(nm_io_unit, nml=nml_general, iostat=iost ) - allocate(io_list(io_listsize)) - READ(nm_io_unit, nml=nml_list, iostat=iost ) - close(nm_io_unit ) + !___________________________________________________________________________ + ! OPEN and read namelist for I/O + open( unit=nm_io_unit, file='namelist.io', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) WRITE(*,*) ' file : ', 'namelist.io',' open ok' + else + if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.io',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + endif + READ(nm_io_unit, nml=nml_general, iostat=iost ) + allocate(io_list(io_listsize)) + READ(nm_io_unit, nml=nml_list, iostat=iost ) + close(nm_io_unit ) + !___________________________________________________________________________ + do i=1, io_listsize + if (trim(io_list(i)%id)=='unknown ') then + if (mype==0) write(*,*) 'io_listsize will be changed from ', io_listsize, ' to ', i-1, '!' + io_listsize=i-1 + EXIT + end if + end do - do i=1, io_listsize - if (trim(io_list(i)%id)=='unknown ') then - if (mype==0) write(*,*) 'io_listsize will be changed from ', io_listsize, ' to ', i-1, '!' - io_listsize=i-1 - EXIT - end if - end do - +!_______________________________________________________________________________ DO i=1, io_listsize SELECT CASE (trim(io_list(i)%id)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!2D streams!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -166,7 +180,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) CASE ('ssh_rhs_old ') call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', dynamics%ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! output sea ice CASE ('uice ') if (use_ice) then @@ -201,7 +215,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', ice%data(3)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! output mixed layer depth CASE ('MLD1 ') call def_stream(nod2D, myDim_nod2D, 'MLD1', 'Mixed Layer Depth', 'm', MLD1(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) @@ -210,7 +224,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) CASE ('MLD3 ') call def_stream(nod2D, myDim_nod2D, 'MLD3', 'Mixed Layer Depth', 'm', MLD3(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! output surface forcing CASE ('fh ') call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux_in(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) @@ -265,7 +279,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) sel_forcvar(2) = 1 call def_stream(nod2D, myDim_nod2D, 'vwind', '10m merid. surface wind velocity','m/s', v_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! output KPP vertical mixing schemes CASE ('kpp_obldepth ') if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then! fesom KPP @@ -349,7 +363,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! output Ferrari/GM parameterisation CASE ('bolus_u ') if (Fer_GM) then @@ -376,7 +390,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream( nod2D , myDim_nod2D , 'fer_C', 'GM, depth independent speed', 'm/s' , fer_c(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! Density MOC diagnostics CASE ('dMOC ') if (ldiag_dMOC) then @@ -392,14 +406,14 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(elem2D, myDim_elem2D , 'density_flux_e', 'density flux at elems ', 'm', dens_flux_e(:), 1, 'y', i_real4, partit, mesh) end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! PGF (pressure gradient force) diagnostic CASE ('pgf_x ') call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_x', 'zonal pressure gradient force' , 'm/s^2', pgf_x(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('pgf_y ') call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_y', 'meridional pressure gradient force', 'm/s^2', pgf_y(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ #if defined (__oifs) CASE ('alb ') call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice%atmcoupl%ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) @@ -411,7 +425,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', ice%atmcoupl%oce_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) #endif -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! TKE mixing diagnostic CASE ('TKE ') if (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then @@ -431,7 +445,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! IDEMIX mixing Internal-Wave-Energy diagnostics CASE ('IDEMIX ') if (mod(mix_scheme_nmb,10)==6) then @@ -448,7 +462,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(elem2D , myDim_elem2D , 'iwe_fsrf', 'IDEMIX surface forcing' , 'm^3/s^3', iwe_fsrf(:) , io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! TIDAL mixing diagnostics CASE ('TIDAL ') if (mod(mix_scheme_nmb,10)==7) then @@ -458,7 +472,7 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(elem2D , myDim_elem2D , 'tidal_fbot', 'near tidal bottom forcing' , 'W/m^2' , tidal_fbot , io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ ! TIDAL mixing diagnostics CASE ('FORC ') if (ldiag_forc) then @@ -481,74 +495,68 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:),io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) #endif end if -!___________________________________________________________________________________________________________________________________ +!_______________________________________________________________________________ CASE DEFAULT if (mype==0) write(*,*) 'stream ', io_list(i)%id, ' is not defined !' END SELECT ! --> SELECT CASE (trim(io_list(i)%id)) END DO ! --> DO i=1, io_listsize -!3D - if (ldiag_energy) then - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'rhof', 'in-situ density at faces', 'kg/m3', rhof(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wrhof', 'vertical velocity x density', 'kg/(s*m2)', wrhof(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uu', 'u times u', 'm2/s2', u_x_u(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uv', 'u times v', 'm2/s2', u_x_v(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vv', 'v times v', 'm2/s2', v_x_v(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'uw', 'u times w', 'm2/s2', u_x_w(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'vw', 'v times w', 'm2/s2', v_x_w(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudx', 'du/dx', '1/s', dudx(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudy', 'du/dy', '1/s', dudy(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdx', 'dv/dx', '1/s', dvdx(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdy', 'dv/dy', '1/s', dvdy(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dudz', 'du/dz', '1/s', dudz(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dvdz', 'dv/dz', '1/s', dvdz(:,:), 1, 'm', i_real8, partit, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz', 'int(Av * du/dz)', 'm3/s2', av_dudz(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) - - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) - - call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'u_bott', 'bottom velocity', 'm/s', u_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'v_bott', 'bottom velocity', 'm/s', v_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'u_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'v_surf', 'surface velocity', 'm/s', v_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'tx_bot', 'bottom stress x', 'N/m2', stress_bott(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) - call def_stream(elem2D, myDim_elem2D, 'ty_bot', 'bottom stress y', 'N/m2', stress_bott(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) - if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind stress to ocean', 'm/s2', stress_surf(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(11)=1 - if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind stress to ocean','m/s2', stress_surf(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(12)=1 - end if + !___________________________________________________________________________ + ! 3d energy diagnostic + if (ldiag_energy) then + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'rhof', 'in-situ density at faces', 'kg/m3', rhof(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wrhof', 'vertical velocity x density', 'kg/(s*m2)', wrhof(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uu', 'u times u', 'm2/s2', u_x_u(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uv', 'u times v', 'm2/s2', u_x_v(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vv', 'v times v', 'm2/s2', v_x_v(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'uw', 'u times w', 'm2/s2', u_x_w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'vw', 'v times w', 'm2/s2', v_x_w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudx', 'du/dx', '1/s', dudx(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudy', 'du/dy', '1/s', dudy(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdx', 'dv/dx', '1/s', dvdx(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdy', 'dv/dy', '1/s', dvdy(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dudz', 'du/dz', '1/s', dudz(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dvdz', 'dv/dz', '1/s', dvdz(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz', 'int(Av * du/dz)', 'm3/s2', av_dudz(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) + + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) + + call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'u_bott', 'bottom velocity', 'm/s', u_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'v_bott', 'bottom velocity', 'm/s', v_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'u_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'v_surf', 'surface velocity', 'm/s', v_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'tx_bot', 'bottom stress x', 'N/m2', stress_bott(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'ty_bot', 'bottom stress y', 'N/m2', stress_bott(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) + if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind stress to ocean', 'm/s2', stress_surf(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(11)=1 + if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind stress to ocean','m/s2', stress_surf(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(12)=1 + end if - - - - - - - - !___________________________________________________________________________________________________________________________________ - ! output Redi parameterisation - if (Redi) then - call def_stream((/nl-1 , nod2D /), (/nl-1, myDim_nod2D /), 'Redi_K', 'Redi diffusion coefficient', 'm2/s', Ki(:,:), 1, 'y', i_real4, partit, mesh) - end if + !___________________________________________________________________________ + ! output Redi parameterisation + if (Redi) then + call def_stream((/nl-1 , nod2D /), (/nl-1, myDim_nod2D /), 'Redi_K', 'Redi diffusion coefficient', 'm2/s', Ki(:,:), 1, 'y', i_real4, partit, mesh) + end if - !___________________________________________________________________________________________________________________________________ - ! output Monin-Obukov (TB04) mixing length - if (use_momix) then - call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, partit, mesh) - end if + !___________________________________________________________________________ + ! output Monin-Obukov (TB04) mixing length + if (use_momix) then + call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, partit, mesh) + end if - !___________________________________________________________________________________________________________________________________ + !___________________________________________________________________________ if (ldiag_curl_vel3) then call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'curl_u', 'relative vorticity', '1/s', vorticity, 1, 'm', i_real4, partit, mesh) end if - !___________________________________________________________________________________________________________________________________ + !___________________________________________________________________________ if (ice%whichEVP==1) then end if @@ -588,613 +596,754 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) end if end subroutine ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ function mesh_dimname_from_dimsize(size, partit, mesh) result(name) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use diagnostics + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use diagnostics #if defined (__icepack) - use icedrv_main, only: ncat ! number of ice thickness cathegories + use icedrv_main, only: ncat ! number of ice thickness cathegories #endif - implicit none - integer :: size - type(t_mesh) , intent(in) :: mesh - type(t_partit), intent(in) :: partit - character(50) :: name - - if (size==mesh%nod2D) then - name='nod2' - elseif (size==mesh%elem2D) then - name='elem' - elseif (size==mesh%nl) then - name='nz' - elseif (size==mesh%nl-1) then - name='nz1' - elseif (size==std_dens_N) then - name='ndens' + implicit none + integer :: size + type(t_mesh) , intent(in) :: mesh + type(t_partit), intent(in) :: partit + character(50) :: name + + if (size==mesh%nod2D) then + name='nod2' + elseif (size==mesh%elem2D) then + name='elem' + elseif (size==mesh%nl) then + name='nz' + elseif (size==mesh%nl-1) then + name='nz1' + elseif (size==std_dens_N) then + name='ndens' #if defined (__icepack) - elseif (size==ncat) then - name='ncat' + elseif (size==ncat) then + name='ncat' #endif - else - name='unknown' - if (partit%mype==0) write(*,*) 'WARNING: unknown dimension in mean I/O with size of ', size - end if + else + name='unknown' + if (partit%mype==0) write(*,*) 'WARNING: unknown dimension in mean I/O with size of ', size + end if end function ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ subroutine create_new_file(entry, ice, dynamics, partit, mesh) - use g_clock - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - USE MOD_DYN - USE MOD_ICE - use fesom_version_info_module - use g_config - use o_PARAM - - implicit none - character(2000) :: att_text - type(t_mesh) , intent(in) :: mesh - type(t_partit), intent(in) :: partit - type(t_dyn) , intent(in) :: dynamics - type(t_ice) , intent(in) :: ice - - type(Meandata), intent(inout) :: entry - character(len=*), parameter :: global_attributes_prefix = "FESOM_" - ! Serial output implemented so far - if (partit%mype/=entry%root_rank) return - ! create an ocean output file - write(*,*) 'initializing I/O file for ', trim(entry%name) + use g_clock + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE MOD_ICE + use fesom_version_info_module + use g_config + use o_PARAM - call assert_nf( nf_create(entry%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), entry%ncid), __LINE__) + implicit none + character(2000) :: att_text + type(t_mesh) , intent(in) :: mesh + type(t_partit), intent(in) :: partit + type(t_dyn) , intent(in) :: dynamics + type(t_ice) , intent(in) :: ice + + type(Meandata), intent(inout) :: entry + character(len=*), parameter :: global_attributes_prefix = "FESOM_" + ! Serial output implemented so far + if (partit%mype/=entry%root_rank) return + ! create an ocean output file + write(*,*) 'initializing I/O file for ', trim(entry%name) + + !___________________________________________________________________________ + ! Create file + call assert_nf( nf_create(entry%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), entry%ncid), __LINE__) -!___Create mesh related dimensions__________________________________________ - if (entry%ndim==1) then - call assert_nf( nf_def_dim(entry%ncid, entry%dimname(1), entry%glsize(2), entry%dimID(1)), __LINE__) - else if (entry%ndim==2) then - call assert_nf( nf_def_dim(entry%ncid, entry%dimname(1), entry%glsize(1), entry%dimID(1)), __LINE__) - call assert_nf( nf_def_var(entry%ncid, entry%dimname(1), NF_DOUBLE, 1, entry%dimID(1), entry%dimvarID(1)), __LINE__) - if (entry%dimname(1)=='nz') then - call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('depth at layer interface'),'depth at layer interface'), __LINE__) - elseif (entry%dimname(1)=='nz1') then - call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('depth at layer midpoint'),'depth at layer midpoint'), __LINE__) - elseif (entry%dimname(1)=='ncat') then - call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('sea-ice thickness class'),'sea-ice thickness class'), __LINE__) - else - if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' - end if - call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'units', len_trim('m'),'m'), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'positive', len_trim('down'),'down'), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'axis', len_trim('Z'),'Z'), __LINE__) - - call assert_nf( nf_def_dim(entry%ncid, entry%dimname(2), entry%glsize(2), entry%dimID(2)), __LINE__) - end if -!___Create time related dimensions__________________________________________ - call assert_nf( nf_def_dim(entry%ncid, 'time', NF_UNLIMITED, entry%recID), __LINE__) -!___Define the time and iteration variables_________________________________ - call assert_nf( nf_def_var(entry%ncid, 'time', NF_DOUBLE, 1, entry%recID, entry%tID), __LINE__) - att_text='time' - call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'long_name', len_trim(att_text), trim(att_text)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'standard_name', len_trim(att_text), trim(att_text)), __LINE__) - write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' - call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'units', len_trim(att_text), trim(att_text)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) + !___________________________________________________________________________ + ! Create mesh related dimensions + if (entry%ndim==1) then + call assert_nf( nf_def_dim(entry%ncid, entry%dimname(1), entry%glsize(2), entry%dimID(1)), __LINE__) + + else if (entry%ndim==2) then + call assert_nf( nf_def_dim(entry%ncid, entry%dimname(1), entry%glsize(1), entry%dimID(1)), __LINE__) + call assert_nf( nf_def_var(entry%ncid, entry%dimname(1), NF_DOUBLE, 1, entry%dimID(1), entry%dimvarID(1)), __LINE__) + if (entry%dimname(1)=='nz') then + call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('depth at layer interface'),'depth at layer interface'), __LINE__) + elseif (entry%dimname(1)=='nz1') then + call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('depth at layer midpoint'),'depth at layer midpoint'), __LINE__) + elseif (entry%dimname(1)=='ncat') then + call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('sea-ice thickness class'),'sea-ice thickness class'), __LINE__) + else + if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' + end if + call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'units', len_trim('m'),'m'), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'positive', len_trim('down'),'down'), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'axis', len_trim('Z'),'Z'), __LINE__) + + call assert_nf( nf_def_dim(entry%ncid, entry%dimname(2), entry%glsize(2), entry%dimID(2)), __LINE__) + end if - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) - + !___________________________________________________________________________ + ! Create time related dimensions + call assert_nf( nf_def_dim(entry%ncid, 'time', NF_UNLIMITED, entry%recID), __LINE__) + + !___________________________________________________________________________ + ! Define the time and iteration variables + call assert_nf( nf_def_var(entry%ncid, 'time', NF_DOUBLE, 1, entry%recID, entry%tID), __LINE__) + att_text='time' + call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'long_name', len_trim(att_text), trim(att_text)), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'standard_name', len_trim(att_text), trim(att_text)), __LINE__) + write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' + call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'units', len_trim(att_text), trim(att_text)), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) + + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) - -!___Global attributes________ - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'model', len_trim('FESOM2'),'FESOM2'), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'website', len_trim('fesom.de'), trim('fesom.de')), __LINE__) - - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'git_SHA', len_trim(fesom_git_sha()), fesom_git_sha()), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'MeshPath', len_trim(MeshPath), trim(MeshPath)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'mesh_representative_checksum', len(mesh%representative_checksum), mesh%representative_checksum), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'ClimateDataPath', len_trim(ClimateDataPath), trim(ClimateDataPath)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'which_ALE', len_trim(which_ALE), trim(which_ALE)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'mix_scheme', len_trim(mix_scheme), trim(mix_scheme)), __LINE__) + !___Global attributes_______________________________________________________ + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'model', len_trim('FESOM2'),'FESOM2'), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'website', len_trim('fesom.de'), trim('fesom.de')), __LINE__) + + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'git_SHA', len_trim(fesom_git_sha()), fesom_git_sha()), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'MeshPath', len_trim(MeshPath), trim(MeshPath)), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'mesh_representative_checksum', len(mesh%representative_checksum), mesh%representative_checksum), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'ClimateDataPath', len_trim(ClimateDataPath), trim(ClimateDataPath)), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'which_ALE', len_trim(which_ALE), trim(which_ALE)), __LINE__) + call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'mix_scheme', len_trim(mix_scheme), trim(mix_scheme)), __LINE__) ! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_hor', len_trim(tra_adv_hor), trim(tra_adv_hor)), __LINE__) ! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_ver', len_trim(tra_adv_ver), trim(tra_adv_ver)), __LINE__) ! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation', NF_INT, 1, force_rotation), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice', NF_INT, 1, use_floatice), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, ice%whichEVP), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, ice%evp_rheol_steps), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'autorotate_back_to_geo', NF_INT, 1, vec_autorotate), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation', NF_INT, 1, force_rotation), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice', NF_INT, 1, use_floatice), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, ice%whichEVP), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, ice%evp_rheol_steps), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'autorotate_back_to_geo', NF_INT, 1, vec_autorotate), __LINE__) - - - -!___This ends definition part of the file, below filling in variables is possible - call assert_nf( nf_enddef(entry%ncid), __LINE__) - if (entry%dimname(1)=='nz') then - call assert_nf( nf_put_var_double(entry%ncid, entry%dimvarID(1), abs(mesh%zbar)), __LINE__) - elseif (entry%dimname(1)=='nz1') then - call assert_nf( nf_put_var_double(entry%ncid, entry%dimvarID(1), abs(mesh%Z)), __LINE__) - else - if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' - end if - - call assert_nf( nf_close(entry%ncid), __LINE__) + !___________________________________________________________________________ + ! This ends definition part of the file, below filling in variables is possible + call assert_nf( nf_enddef(entry%ncid), __LINE__) + if (entry%dimname(1)=='nz') then + call assert_nf( nf_put_var_double(entry%ncid, entry%dimvarID(1), abs(mesh%zbar)), __LINE__) + elseif (entry%dimname(1)=='nz1') then + call assert_nf( nf_put_var_double(entry%ncid, entry%dimvarID(1), abs(mesh%Z)), __LINE__) + else + if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' + end if + + !___________________________________________________________________________ + call assert_nf( nf_close(entry%ncid), __LINE__) end subroutine ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ subroutine assoc_ids(entry) - implicit none - - type(Meandata), intent(inout) :: entry - integer :: j + implicit none + type(Meandata), intent(inout) :: entry + integer :: j - write(*,*) 'associating mean I/O file ', trim(entry%filename) + write(*,*) 'associating mean I/O file ', trim(entry%filename) - do j=1, entry%ndim - call assert_nf( nf_inq_dimid(entry%ncid, entry%dimname(j), entry%dimID(j)), __LINE__) - end do -!___Associate time related dimensions_______________________________________ - call assert_nf( nf_inq_dimid (entry%ncid, 'time', entry%recID), __LINE__) - call assert_nf( nf_inq_dimlen(entry%ncid, entry%recID, entry%rec_count), __LINE__) -!___Associate the time and iteration variables______________________________ - call assert_nf( nf_inq_varid(entry%ncid, 'time', entry%tID), __LINE__) -!___Associate physical variables____________________________________________ - call assert_nf( nf_inq_varid(entry%ncid, entry%name, entry%varID), __LINE__) + do j=1, entry%ndim + call assert_nf( nf_inq_dimid(entry%ncid, entry%dimname(j), entry%dimID(j)), __LINE__) + end do + !___Associate time related dimensions_______________________________________ + call assert_nf( nf_inq_dimid (entry%ncid, 'time', entry%recID), __LINE__) + call assert_nf( nf_inq_dimlen(entry%ncid, entry%recID, entry%rec_count), __LINE__) + !___Associate the time and iteration variables______________________________ + call assert_nf( nf_inq_varid(entry%ncid, 'time', entry%tID), __LINE__) + !___Associate physical variables____________________________________________ + call assert_nf( nf_inq_varid(entry%ncid, entry%name, entry%varID), __LINE__) end subroutine ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ +! collect local mean output data (entry%local_values_r8_copy) into global 2d +! array (entry%aux_r8) and use the root_rank CPU/Task to write them into netcdf +! file. In case of 3data write horizontal slices level wise. subroutine write_mean(entry, entry_index) - use mod_mesh - use io_gather_module - implicit none - type(Meandata), intent(inout) :: entry - integer, intent(in) :: entry_index - integer tag - integer :: i, size1, size2, size_gen, size_lev, order - integer :: c, lev - integer mpierr - - - ! Serial output implemented so far - if (entry%p_partit%mype==entry%root_rank) then - write(*,*) 'writing mean record for ', trim(entry%name), '; rec. count = ', entry%rec_count - call assert_nf( nf_put_vara_double(entry%ncid, entry%Tid, entry%rec_count, 1, entry%ctime_copy, 1), __LINE__) - end if -! !_______writing 2D and 3D fields________________________________________________ - size1=entry%glsize(1) - size2=entry%glsize(2) - tag = 2 ! we can use a fixed tag here as we have an individual communicator for each output field -!___________writing 8 byte real_________________________________________ - if (entry%accuracy == i_real8) then - if(entry%p_partit%mype==entry%root_rank) then - if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) - end if - do lev=1, size1 + use mod_mesh + use io_gather_module + implicit none + type(Meandata), intent(inout) :: entry + integer, intent(in) :: entry_index + integer tag + integer :: i, size1, size2, size_gen, size_lev, order + integer :: c, lev + real(kind=8) :: t0,t1 + integer mpierr + + ! Serial output implemented so far + !___________________________________________________________________________ + ! write new time index ctime_copy to file --> expand time array in nc file + if (entry%p_partit%mype==entry%root_rank) then + write(*,*) 'writing mean record for ', trim(entry%name), '; rec. count = ', entry%rec_count + call assert_nf( nf_put_vara_double(entry%ncid, entry%Tid, entry%rec_count, 1, entry%ctime_copy, 1), __LINE__) + end if + + !_______writing 2D and 3D fields____________________________________________ + size1=entry%glsize(1) + size2=entry%glsize(2) + tag = 2 ! we can use a fixed tag here as we have an individual communicator for each output field + + !___________writing 8 byte real_____________________________________________ + if (entry%accuracy == i_real8) then + + !_______________________________________________________________________ + ! allocate global 2d array in which local data are gathered + if(entry%p_partit%mype==entry%root_rank) then + if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) + end if + + !_______________________________________________________________________ + ! loop over vertical layers --> do gather 3d variables layerwise in 2d + ! slices + do lev=1, size1 #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS - ! aleph cray-mpich workaround - call MPI_Barrier(entry%comm, mpierr) + ! aleph cray-mpich workaround + call MPI_Barrier(entry%comm, mpierr) +#elif ENABLE_ALBEDO_INTELMPI_WORKAROUNDS + call MPI_Barrier(entry%comm, mpierr) #endif - if(.not. entry%is_elem_based) then - call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) - else - call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) - end if - if (entry%p_partit%mype==entry%root_rank) then - if (entry%ndim==1) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) - elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) - end if - end if - end do + !___________________________________________________________________ + ! local output variables are gahtered in 2d shaped entry%aux_r8 + ! either for vertices or elements + if(.not. entry%is_elem_based) then + call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) + else + call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) + end if + + !___________________________________________________________________ + ! use root_rank CPU/Task to write 2d slice into netcdf file for 3d + ! variables into specific layer position lev + if (entry%p_partit%mype==entry%root_rank) then + if (entry%ndim==1) then + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) + elseif (entry%ndim==2) then + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) + end if + end if + end do ! --> do lev=1, size1 -!___________writing 4 byte real _________________________________________ - else if (entry%accuracy == i_real4) then - if(entry%p_partit%mype==entry%root_rank) then - if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) - end if - do lev=1, size1 + !___________writing 4 byte real ____________________________________________ + else if (entry%accuracy == i_real4) then + + !_______________________________________________________________________ + ! allocate global 2d array in which local data are gathered + if(entry%p_partit%mype==entry%root_rank) then + if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) + end if + + !_______________________________________________________________________ + ! loop over vertical layers --> do gather 3d variables layerwise in 2d + ! slices + do lev=1, size1 + if (entry%p_partit%mype==entry%root_rank) t0=MPI_Wtime() #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS - ! aleph cray-mpich workaround - call MPI_Barrier(entry%comm, mpierr) + ! aleph cray-mpich workaround + call MPI_Barrier(entry%comm, mpierr) +#elif ENABLE_ALBEDO_INTELMPI_WORKAROUNDS + call MPI_Barrier(entry%comm, mpierr) #endif - if(.not. entry%is_elem_based) then - call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) - else - call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) - end if - if (entry%p_partit%mype==entry%root_rank) then - if (entry%ndim==1) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) - elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) - end if - end if - end do - end if - + t0=MPI_Wtime() + !___________________________________________________________________ + ! local output variables are gahtered in 2d shaped entry%aux_r8 + ! either for vertices or elements + if(.not. entry%is_elem_based) then + call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) + else + call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) + end if + + !___________________________________________________________________ + ! use root_rank CPU/Task to write 2d slice into netcdf file for 3d + ! variables into specific layer position lev + if (entry%p_partit%mype==entry%root_rank) then + if (entry%ndim==1) then + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) + t1=MPI_Wtime() + !PS if (entry%p_partit%flag_debug) print *, achar(27)//'[31m'//' -I/O-> after nf_put_vara_real'//achar(27)//'[0m', entry%p_partit%mype, t1-t0 + elseif (entry%ndim==2) then + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) + t1=MPI_Wtime() + !PS if (entry%p_partit%flag_debug) print *, achar(27)//'[31m'//' -I/O-> after nf_put_vara_real'//achar(27)//'[0m', entry%p_partit%mype, lev, t1-t0 + end if + end if + end do ! --> do lev=1, size1 + end if ! --> if (entry%accuracy == i_real8) then end subroutine - - +! +! +!_______________________________________________________________________________ subroutine update_means - implicit none - type(Meandata), pointer :: entry - integer :: n - integer :: I, J + implicit none + type(Meandata), pointer :: entry + integer :: n + integer :: I, J - DO n=1, io_NSTREAMS - entry=>io_stream(n) -!_____________ compute in 8 byte accuracy _________________________ - IF (entry%accuracy == i_real8) then - IF (entry%flip) then + DO n=1, io_NSTREAMS + entry=>io_stream(n) + + !_____________ compute in 8 byte accuracy ______________________________ + IF (entry%accuracy == i_real8) then + IF (entry%flip) then !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) - DO J=1, size(entry%local_values_r8,dim=2) - DO I=1, size(entry%local_values_r8,dim=1) - entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(J,I) - END DO - END DO + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(J,I) + END DO + END DO !$OMP END PARALLEL DO - ELSE + ELSE !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) - DO J=1, size(entry%local_values_r8,dim=2) - DO I=1, size(entry%local_values_r8,dim=1) - entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(I,J) - END DO - END DO + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(I,J) + END DO + END DO !$OMP END PARALLEL DO - END IF -!_____________ compute in 4 byte accuracy _________________________ - ELSE IF (entry%accuracy == i_real4) then - IF (entry%flip) then + END IF + + !_____________ compute in 4 byte accuracy ______________________________ + ELSE IF (entry%accuracy == i_real4) then + IF (entry%flip) then !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) - DO J=1, size(entry%local_values_r4,dim=2) - DO I=1, size(entry%local_values_r4,dim=1) - entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(J,I), real32) - END DO - END DO + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(J,I), real32) + END DO + END DO !$OMP END PARALLEL DO - ELSE + ELSE !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) - DO J=1, size(entry%local_values_r4,dim=2) - DO I=1, size(entry%local_values_r4,dim=1) - entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(I,J), real32) - END DO - END DO + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(I,J), real32) + END DO + END DO !$OMP END PARALLEL DO - END IF - END IF - entry%addcounter=entry%addcounter+1 - END DO + END IF + END IF ! --> IF (entry%accuracy == i_real8) then + + entry%addcounter=entry%addcounter+1 + END DO ! --> DO n=1, io_NSTREAMS end subroutine ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ +! main output routine called at the end of each time step --> here is decided if +! output event is triggered subroutine output(istep, ice, dynamics, tracers, partit, mesh) - use g_clock - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - use MOD_DYN - use MOD_ICE - use mod_tracer - use io_gather_module + use g_clock + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + use MOD_ICE + use mod_tracer + use io_gather_module #if defined (__icepack) - use icedrv_main, only: init_io_icepack + use icedrv_main, only: init_io_icepack #endif - implicit none - integer :: istep - logical, save :: lfirst=.true. - integer :: n, k - integer :: i, j !for OMP loops - logical :: do_output - type(Meandata), pointer :: entry - type(t_mesh), intent(in) , target :: mesh - type(t_partit), intent(inout), target :: partit - type(t_tracer), intent(in) , target :: tracers - type(t_dyn) , intent(in) , target :: dynamics - type(t_ice) , intent(inout), target :: ice - - character(:), allocatable :: filepath - real(real64) :: rtime !timestamp of the record + implicit none + integer :: istep + logical, save :: lfirst=.true. + integer :: n, k + integer :: i, j !for OMP loops + logical :: do_output + type(Meandata), pointer :: entry + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(inout), target :: ice + + character(:), allocatable :: filepath + real(real64) :: rtime !timestamp of the record - ctime=timeold+(dayold-1.)*86400 - if (lfirst) then - call ini_mean_io(ice, dynamics, tracers, partit, mesh) - call init_io_gather(partit) + ctime=timeold+(dayold-1.)*86400 + + !___________________________________________________________________________ + if (lfirst) then + ! define output streams-->dimension, variable, long_name, units, array, freq, unit, precision + !PS if (partit%flag_debug .and. partit%mype==0) print *, achar(27)//'[32m'//' -I/O-> call ini_mean_io'//achar(27)//'[0m' + call ini_mean_io(ice, dynamics, tracers, partit, mesh) + + !PS if (partit%flag_debug .and. partit%mype==0) print *, achar(27)//'[33m'//' -I/O-> call init_io_gather'//achar(27)//'[0m' + call init_io_gather(partit) #if defined (__icepack) - call init_io_icepack(mesh) !icapack has its copy of p_partit => partit + call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif - end if - - call update_means - - do n=1, io_NSTREAMS - entry=>io_stream(n) - !check whether restart will be written - do_output=.false. - - if (entry%freq_unit.eq.'y') then - call annual_event(do_output) - - else if (entry%freq_unit == 'm') then - call monthly_event(do_output) - - else if (entry%freq_unit == 'd') then - call daily_event(do_output, entry%freq) - - else if (entry%freq_unit == 'h') then - call hourly_event(do_output, entry%freq) - - else if (entry%freq_unit == 's') then - call step_event(do_output, istep, entry%freq) - - else - write(*,*) 'You did not specify a supported outputflag.' - write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) - stop - endif - - if (do_output) then - if (vec_autorotate) call io_r2g(n, partit, mesh) ! automatically detect if a vector field and rotate if makes sense! - if(entry%thread_running) call entry%thread%join() - entry%thread_running = .false. - - filepath = trim(ResultPath)//trim(entry%name)//'.'//trim(runid)//'.'//cyearnew//'.nc' - if(partit%mype == entry%root_rank) then - if(filepath /= trim(entry%filename)) then - if("" /= trim(entry%filename)) call assert_nf(nf_close(entry%ncid), __LINE__) - entry%filename = filepath - ! use any existing file with this name or create a new one - if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, ice, dynamics, partit, mesh) - call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) - end if - call assoc_ids(entry) - end if - - !___if the time rtime at the rec_count is larger than ctime we look for the closest record with the timestamp less than ctime - do k=entry%rec_count, 1, -1 - call assert_nf( nf_get_vara_double(entry%ncid, entry%tID, k, 1, rtime, 1), __LINE__) - if (ctime > rtime) then - entry%rec_count=k+1 - exit ! a proper rec_count detected, exit the loop - end if - if (k==1) then - write(*,*) 'I/O '//trim(entry%name)//' WARNING: the existing output file will be overwritten'//'; ', entry%rec_count, ' records in the file;' - entry%rec_count=1 - exit ! no appropriate rec_count detected - end if - end do - entry%rec_count=max(entry%rec_count, 1) - write(*,*) trim(entry%name)//': current mean I/O counter = ', entry%rec_count - end if - - if (entry%accuracy == i_real8) then + end if ! --> if (lfirst) then + + !___________________________________________________________________________ + !PS if (partit%flag_debug .and. partit%mype==0) print *, achar(27)//'[33m'//' -I/O-> call update_means'//achar(27)//'[0m' + call update_means + + !___________________________________________________________________________ + ! loop over defined streams + do n=1, io_NSTREAMS + !_______________________________________________________________________ + ! make pointer for entry onto io_stream object + entry=>io_stream(n) + + !_______________________________________________________________________ + !check whether output will be written based on event frequency + do_output=.false. + if (entry%freq_unit.eq.'y') then + call annual_event(do_output) + else if (entry%freq_unit == 'm') then + call monthly_event(do_output) + else if (entry%freq_unit == 'd') then + call daily_event(do_output, entry%freq) + else if (entry%freq_unit == 'h') then + call hourly_event(do_output, entry%freq) + else if (entry%freq_unit == 's') then + call step_event(do_output, istep, entry%freq) + else + write(*,*) 'You did not specify a supported outputflag.' + write(*,*) 'The program will stop to give you opportunity to do it.' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + stop + endif + + !_______________________________________________________________________ + ! if its time for output --> do_output==.true. + if (do_output) then + if (vec_autorotate) call io_r2g(n, partit, mesh) ! automatically detect if a vector field and rotate if makes sense! + if(entry%thread_running) call entry%thread%join() + entry%thread_running = .false. + + ! define filepath + filepath = trim(ResultPath)//trim(entry%name)//'.'//trim(runid)//'.'//cyearnew//'.nc' + + !___________________________________________________________________ + ! only root rank task does output + if(partit%mype == entry%root_rank) then + !_______________________________________________________________ + ! create new output file ?! + if(filepath /= trim(entry%filename)) then + if("" /= trim(entry%filename)) call assert_nf(nf_close(entry%ncid), __LINE__) + entry%filename = filepath + !___________________________________________________________ + ! use any existing file with this name or create a new one + if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then + !PS if (partit%flag_debug) print *, achar(27)//'[33m'//' -I/O-> call create_new_file'//achar(27)//'[0m' + call create_new_file(entry, ice, dynamics, partit, mesh) + + !PS if (partit%flag_debug) print *, achar(27)//'[33m'//' -I/O-> call assert_nf A'//achar(27)//'[0m'//', k=',k, ', rootpart=', entry%root_rank + call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) + end if + + !___________________________________________________________ + ! setup all dimension definition and attributes of the netcdf + ! file + !PS if (partit%flag_debug) print *, achar(27)//'[33m'//' -I/O-> call assoc_ids'//achar(27)//'[0m' + call assoc_ids(entry) + + end if ! --> if(filepath /= trim(entry%filename)) then + + !_______________________________________________________________ + ! if the time rtime at the rec_count is larger than ctime we + ! look for the closest record with the timestamp less than ctime + do k=entry%rec_count, 1, -1 + !PS if (partit%flag_debug) print *, achar(27)//'[33m'//' -I/O-> call assert_nf B'//achar(27)//'[0m'//', k=',k, ', rootpart=', entry%root_rank + ! determine rtime from exiting file + call assert_nf( nf_get_vara_double(entry%ncid, entry%tID, k, 1, rtime, 1), __LINE__) + if (ctime > rtime) then + entry%rec_count=k+1 + exit ! a proper rec_count detected, exit the loop + end if + if (k==1) then + write(*,*) 'I/O '//trim(entry%name)//' WARNING: the existing output file will be overwritten'//'; ', entry%rec_count, ' records in the file;' + entry%rec_count=1 + exit ! no appropriate rec_count detected + end if + end do + entry%rec_count=max(entry%rec_count, 1) + write(*,*) trim(entry%name)//': current mean I/O counter = ', entry%rec_count + end if ! --> if(partit%mype == entry%root_rank) then + + !___________________________________________________________________ + ! write double precision output + if (entry%accuracy == i_real8) then !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) - DO J=1, size(entry%local_values_r8,dim=2) - DO I=1, size(entry%local_values_r8,dim=1) - entry%local_values_r8_copy(I,J) = entry%local_values_r8(I,J) /real(entry%addcounter,real64) ! compute_means - entry%local_values_r8(I,J) = 0._real64 ! clean_meanarrays - END DO - END DO + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8_copy(I,J) = entry%local_values_r8(I,J) /real(entry%addcounter,real64) ! compute_means + entry%local_values_r8(I,J) = 0._real64 ! clean_meanarrays + END DO ! --> DO I=1, size(entry%local_values_r8,dim=1) + END DO ! --> DO J=1, size(entry%local_values_r8,dim=2) !$OMP END PARALLEL DO - else if (entry%accuracy == i_real4) then + + !___________________________________________________________________ + ! write single precision output + else if (entry%accuracy == i_real4) then !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) - DO J=1, size(entry%local_values_r4,dim=2) - DO I=1, size(entry%local_values_r4,dim=1) - entry%local_values_r4_copy(I,J) = entry%local_values_r4(I,J) /real(entry%addcounter,real32) ! compute_means - entry%local_values_r4(I,J) = 0._real32 ! clean_meanarrays - END DO - END DO + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4_copy(I,J) = entry%local_values_r4(I,J) /real(entry%addcounter,real32) ! compute_means + entry%local_values_r4(I,J) = 0._real32 ! clean_meanarrays + END DO ! --> DO I=1, size(entry%local_values_r4,dim=1) + END DO ! --> DO J=1, size(entry%local_values_r4,dim=2) !$OMP END PARALLEL DO - end if - entry%addcounter = 0 ! clean_meanarrays - entry%ctime_copy = ctime - call entry%thread%run() - entry%thread_running = .true. - endif - end do - lfirst=.false. + end if ! --> if (entry%accuracy == i_real8) then + + !___________________________________________________________________ + entry%addcounter = 0 ! clean_meanarrays + entry%ctime_copy = ctime + + !___________________________________________________________________ + ! this is where the magic happens --> here do_output_callback is + ! triggered as a method of the io_stream object --> call write_mean(...) + call entry%thread%run() + entry%thread_running = .true. + + endif ! --> if (do_output) then + end do ! --> do n=1, io_NSTREAMS + lfirst=.false. end subroutine - - +! +! +!_______________________________________________________________________________ +! this becomes the output callback functions that becomes part of the IOstream(idx).thread +! object +! --> this callback function becomes initialised in def_stream_after_dimension_specific(...) +! by call IOstream(idx)%thread%initialize(do_output_callback, entry_index) +! --> the execution of this function can be triggered by IOstream(idx)%thread%run() +! --> do_output_callback becomes an executable method of the object +! IOstream(idx)%thread subroutine do_output_callback(entry_index) -use mod_mesh -USE MOD_PARTIT -USE MOD_PARSUP - integer, intent(in) :: entry_index - ! EO args - type(Meandata), pointer :: entry - - - entry=>io_stream(entry_index) - entry%p_partit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + integer, intent(in) :: entry_index ! index of variable in def_stream array + ! EO args + type(Meandata), pointer :: entry - call write_mean(entry, entry_index) - if(entry%p_partit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write + entry=>io_stream(entry_index) + entry%p_partit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + !___________________________________________________________________________ + ! collect local mean output data (entry%local_values_r8_copy) into global 2d + ! array (entry%aux_r8) and use the root_rank CPU/Task to write them into netcdf + ! file. In case of 3data write horizontal slices level wise. + call write_mean(entry, entry_index) + + !___________________________________________________________________________ + ! The function NF_SYNC offers a way to synchronize the disk copy of a netCDF + ! dataset with in-memory buffers. There are two reasons you might want to + ! synchronize after writes: + ! To minimize data loss in case of abnormal termination, or To make data + ! available to other processes for reading immediately after it is written. + if(entry%p_partit%mype == entry%root_rank) then + !PS if (entry%p_partit%flag_debug) print *, achar(27)//'[31m'//' -I/O-> call nf_sync'//achar(27)//'[0m', entry%p_partit%mype + call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write + end if + end subroutine - - +! +! +!_______________________________________________________________________________ +! Why this is done?! --> executed in fesom_main.F90 in call fesom_finalize() +! after the run lop is finished --> stop/cleanup threads? subroutine finalize_output() - integer i - type(Meandata), pointer :: entry - - do i=1, io_NSTREAMS - entry=>io_stream(i) - if(entry%thread_running) call entry%thread%join() - entry%thread_running = .false. - end do + integer i + type(Meandata), pointer :: entry + do i=1, io_NSTREAMS + entry=>io_stream(i) + if(entry%thread_running) call entry%thread%join() + entry%thread_running = .false. + end do end subroutine ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ +! build 3d meandata streaming object subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh, flip_array) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - implicit none - type(t_partit), intent(inout), target :: partit - integer, intent(in) :: glsize(2), lcsize(2) - character(len=*), intent(in) :: name, description, units - real(kind=WP), target, intent(in) :: data(:,:) - integer, intent(in) :: freq - character, intent(in) :: freq_unit - integer, intent(in) :: accuracy - type(Meandata), allocatable :: tmparr(:) - type(Meandata), pointer :: entry - type(t_mesh), intent(in), target :: mesh - logical, optional, intent(in) :: flip_array - integer i + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + implicit none + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: glsize(2), lcsize(2) + character(len=*), intent(in) :: name, description, units + real(kind=WP), target, intent(in) :: data(:,:) + integer, intent(in) :: freq + character, intent(in) :: freq_unit + integer, intent(in) :: accuracy + type(Meandata), allocatable :: tmparr(:) + type(Meandata), pointer :: entry + type(t_mesh), intent(in), target :: mesh + logical, optional, intent(in) :: flip_array + integer i + !___________________________________________________________________________ #if !defined(__PGI) - do i = 1, rank(data) - if ((ubound(data, dim = i)<=0)) then - if (partit%mype==0) then - write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' - write(*,*) 'upper bound is: ', ubound(data, dim = i) - end if - return - end if - end do + do i = 1, rank(data) + if ((ubound(data, dim = i)<=0)) then + if (partit%mype==0) then + write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' + write(*,*) 'upper bound is: ', ubound(data, dim = i) + end if + return + end if + end do #endif - if (partit%mype==0) then - write(*,*) 'adding I/O stream 3D for ', trim(name) - end if - - call associate_new_stream(name, entry) - - ! 3d specific - entry%ptr3 => data !2D! entry%ptr3(1:1,1:size(data)) => data + !___________________________________________________________________________ + if (partit%mype==0) then + write(*,*) 'adding I/O stream 3D for ', trim(name) + end if + + !___________________________________________________________________________ + ! initialise meandata streaming object + call associate_new_stream(name, entry) - if (present(flip_array)) then - if (flip_array) then - entry%flip = .true. - else - entry%flip = .false. - end if - else - entry%flip = .false. - end if + !___________________________________________________________________________ + ! fill up 3d meandata streaming object + ! 3d specific + entry%ptr3 => data !2D! entry%ptr3(1:1,1:size(data)) => data + + if (present(flip_array)) then + if (flip_array) then + entry%flip = .true. + else + entry%flip = .false. + end if + else + entry%flip = .false. + end if - entry%ndim=2 - entry%glsize=glsize !2D! entry%glsize=(/1, glsize/) + entry%ndim=2 + entry%glsize=glsize !2D! entry%glsize=(/1, glsize/) - if (accuracy == i_real8) then - allocate(entry%local_values_r8(lcsize(1), lcsize(2))) - entry%local_values_r8 = 0._real64 - elseif (accuracy == i_real4) then - allocate(entry%local_values_r4(lcsize(1), lcsize(2))) - entry%local_values_r4 = 0._real32 - end if + if (accuracy == i_real8) then + allocate(entry%local_values_r8(lcsize(1), lcsize(2))) + entry%local_values_r8 = 0._real64 + elseif (accuracy == i_real4) then + allocate(entry%local_values_r4(lcsize(1), lcsize(2))) + entry%local_values_r4 = 0._real32 + end if - entry%dimname(1)=mesh_dimname_from_dimsize(glsize(1), partit, mesh) !2D! mesh_dimname_from_dimsize(glsize, mesh) - entry%dimname(2)=mesh_dimname_from_dimsize(glsize(2), partit, mesh) !2D! entry%dimname(2)='unknown' - ! non dimension specific - call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) + entry%dimname(1)=mesh_dimname_from_dimsize(glsize(1), partit, mesh) !2D! mesh_dimname_from_dimsize(glsize, mesh) + entry%dimname(2)=mesh_dimname_from_dimsize(glsize(2), partit, mesh) !2D! entry%dimname(2)='unknown' + + ! non dimension specific + call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) end subroutine ! -!-------------------------------------------------------------------------------------------- ! +!_______________________________________________________________________________ +! build 2d meandata streaming object subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh) - use mod_mesh - USE MOD_PARTIT - USE MOD_PARSUP - implicit none - integer, intent(in) :: glsize, lcsize - character(len=*), intent(in) :: name, description, units - real(kind=WP), target, intent(in) :: data(:) - integer, intent(in) :: freq - character, intent(in) :: freq_unit - integer, intent(in) :: accuracy - type(Meandata), allocatable :: tmparr(:) - type(Meandata), pointer :: entry - type(t_mesh), intent(in) :: mesh - type(t_partit), intent(inout) :: partit - integer i + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + implicit none + integer, intent(in) :: glsize, lcsize + character(len=*), intent(in) :: name, description, units + real(kind=WP), target, intent(in) :: data(:) + integer, intent(in) :: freq + character, intent(in) :: freq_unit + integer, intent(in) :: accuracy + type(Meandata), allocatable :: tmparr(:) + type(Meandata), pointer :: entry + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(inout) :: partit + integer i + !___________________________________________________________________________ #if !defined(__PGI) - do i = 1, rank(data) - if ((ubound(data, dim = i)<=0)) then - if (partit%mype==0) then - write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' - write(*,*) 'upper bound is: ', ubound(data, dim = i) - end if - return - end if - end do + do i = 1, rank(data) + if ((ubound(data, dim = i)<=0)) then + if (partit%mype==0) then + write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' + write(*,*) 'upper bound is: ', ubound(data, dim = i) + end if + return + end if + end do #endif - if (partit%mype==0) then - write(*,*) 'adding I/O stream 2D for ', trim(name) - end if + !___________________________________________________________________________ + if (partit%mype==0) then + write(*,*) 'adding I/O stream 2D for ', trim(name) + end if - call associate_new_stream(name, entry) - - ! 2d specific - entry%ptr3(1:1,1:size(data)) => data(:) + !___________________________________________________________________________ + ! initialise meandata streaming object + call associate_new_stream(name, entry) + + !___________________________________________________________________________ + ! fill up 3d meandata streaming object + ! 2d specific + entry%ptr3(1:1,1:size(data)) => data(:) - if (accuracy == i_real8) then - allocate(entry%local_values_r8(1, lcsize)) - entry%local_values_r8 = 0._real64 - elseif (accuracy == i_real4) then - allocate(entry%local_values_r4(1, lcsize)) - entry%local_values_r4 = 0._real32 - end if + if (accuracy == i_real8) then + allocate(entry%local_values_r8(1, lcsize)) + entry%local_values_r8 = 0._real64 + elseif (accuracy == i_real4) then + allocate(entry%local_values_r4(1, lcsize)) + entry%local_values_r4 = 0._real32 + end if - entry%ndim=1 - entry%glsize=(/1, glsize/) + entry%ndim=1 + entry%glsize=(/1, glsize/) - entry%dimname(1)=mesh_dimname_from_dimsize(glsize, partit, mesh) - entry%dimname(2)='unknown' + entry%dimname(1)=mesh_dimname_from_dimsize(glsize, partit, mesh) + entry%dimname(2)='unknown' - ! non dimension specific - call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) + ! non dimension specific + call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) end subroutine - - - subroutine associate_new_stream(name, entry) - type(Meandata), pointer :: entry +! +! +!_______________________________________________________________________________ +! initialse new meandata streaming object +subroutine associate_new_stream(name, entry) + type(Meandata), pointer :: entry character(len=*), intent(in) :: name integer i entry => null() + !___________________________________________________________________________ ! check if we already have this variable do i=1, io_NSTREAMS - if(trim(io_stream(i)%name) .eq. name) then - print *,"variable '"//name//"' already exists, & - &check if you define it multiple times, for example in namelist.io, & - &namelist.icepack, io_meandata.F90 or other place that add I/O stream." - call assert(.false., __LINE__) - end if + if(trim(io_stream(i)%name) .eq. name) then + print *,"variable '"//name//"' already exists, & + &check if you define it multiple times, for example in namelist.io, & + &namelist.icepack, io_meandata.F90 or other place that add I/O stream." + call assert(.false., __LINE__) + end if end do - + + !___________________________________________________________________________ ! add this instance to io_stream array io_NSTREAMS = io_NSTREAMS +1 call assert(size(io_stream) >= io_NSTREAMS, __LINE__) entry=>io_stream(io_NSTREAMS) - end subroutine - - - subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) +end subroutine +! +! +!_______________________________________________________________________________ +! further fill up 2d/3d meandata streaming object --> link output callback routine +! as stream object method +subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) use mod_mesh USE MOD_PARTIT USE MOD_PARSUP @@ -1214,6 +1363,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, entry_index = io_NSTREAMS + !___________________________________________________________________________ entry%accuracy = accuracy if (accuracy == i_real8) then @@ -1226,6 +1376,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, stop endif ! accuracy + !___________________________________________________________________________ entry%name = name entry%description = description entry%units = units @@ -1236,6 +1387,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, entry%addcounter = 0 entry%is_in_use=.true. + !___________________________________________________________________________ if(entry%glsize(1)==mesh%nod2D .or. entry%glsize(2)==mesh%nod2D) then entry%is_elem_based = .false. else if(entry%glsize(1)==mesh%elem2D .or. entry%glsize(2)==mesh%elem2D) then @@ -1244,19 +1396,21 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, if(partit%mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" stop end if - + + !___________________________________________________________________________ if (accuracy == i_real8) then allocate(entry%local_values_r8_copy(size(entry%local_values_r8, dim=1), size(entry%local_values_r8, dim=2))) else if (accuracy == i_real4) then allocate(entry%local_values_r4_copy(size(entry%local_values_r4, dim=1), size(entry%local_values_r4, dim=2))) end if + !___________________________________________________________________________ ! set up async output - entry%root_rank = next_io_rank(partit%MPI_COMM_FESOM, async_netcdf_allowed, partit) call MPI_Comm_dup(partit%MPI_COMM_FESOM, entry%comm, err) + ! initialise output callback routine as meandata stream object method call entry%thread%initialize(do_output_callback, entry_index) if(.not. async_netcdf_allowed) call entry%thread%disable_async() @@ -1268,33 +1422,37 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, entry%mype_workaround = partit%mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads entry%p_partit=>partit - end subroutine - - - subroutine assert_nf(status, line) +end subroutine +! +! +!_______________________________________________________________________________ +subroutine assert_nf(status, line) integer, intent(in) :: status integer, intent(in) :: line ! EO args include "netcdf.inc" ! old netcdf fortran interface required? if(status /= NF_NOERR) then - print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) - stop 1 + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 end if - end subroutine - - - subroutine assert(val, line) +end subroutine +! +! +!_______________________________________________________________________________ +subroutine assert(val, line) logical, intent(in) :: val integer, intent(in) :: line ! EO args if(.NOT. val) then - print *, "error in line ",line, __FILE__ - stop 1 + print *, "error in line ",line, __FILE__ + stop 1 end if - end subroutine - - - subroutine io_r2g(n, partit, mesh) +end subroutine +! +! +!_______________________________________________________________________________ +! do vector rotation on the fly +subroutine io_r2g(n, partit, mesh) USE MOD_MESH USE MOD_PARTIT USE g_rotate_grid @@ -1330,42 +1488,44 @@ subroutine io_r2g(n, partit, mesh) write(*,*) trim(entry_x%name)//' and '//trim(entry_y%name)//' will be rotated before output!' END IF + !___________________________________________________________________________ IF ((entry_x%accuracy == i_real8) .AND. (entry_y%accuracy == i_real8)) THEN !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, xmean, ymean) - DO J=1, size(entry_x%local_values_r8,dim=2) - if (entry_x%is_elem_based) then - xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP - ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP - else - xmean=mesh%coord_nod2D(1, J) - ymean=mesh%coord_nod2D(2, J) - end if - DO I=1, size(entry_x%local_values_r8,dim=1) - call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), xmean, ymean, 0) - END DO - END DO + DO J=1, size(entry_x%local_values_r8,dim=2) + if (entry_x%is_elem_based) then + xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP + ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP + else + xmean=mesh%coord_nod2D(1, J) + ymean=mesh%coord_nod2D(2, J) + end if + DO I=1, size(entry_x%local_values_r8,dim=1) + call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), xmean, ymean, 0) + END DO + END DO !$OMP END PARALLEL DO END IF + !___________________________________________________________________________ IF ((entry_x%accuracy == i_real4) .AND. (entry_y%accuracy == i_real4)) THEN !$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, temp_x, temp_y, xmean, ymean) - DO J=1, size(entry_x%local_values_r4,dim=2) - if (entry_x%is_elem_based) then - xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP - ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP - else - xmean=mesh%coord_nod2D(1, J) - ymean=mesh%coord_nod2D(2, J) - end if - DO I=1, size(entry_x%local_values_r4,dim=1) - temp_x=real(entry_x%local_values_r4(I,J), real64) - temp_y=real(entry_y%local_values_r4(I,J), real64) - call vector_r2g(temp_x, temp_y, xmean, ymean, 0) - entry_x%local_values_r4(I,J)=real(temp_x, real32) - entry_y%local_values_r4(I,J)=real(temp_y, real32) - END DO - END DO + DO J=1, size(entry_x%local_values_r4,dim=2) + if (entry_x%is_elem_based) then + xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP + ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP + else + xmean=mesh%coord_nod2D(1, J) + ymean=mesh%coord_nod2D(2, J) + end if + DO I=1, size(entry_x%local_values_r4,dim=1) + temp_x=real(entry_x%local_values_r4(I,J), real64) + temp_y=real(entry_y%local_values_r4(I,J), real64) + call vector_r2g(temp_x, temp_y, xmean, ymean, 0) + entry_x%local_values_r4(I,J)=real(temp_x, real32) + entry_y%local_values_r4(I,J)=real(temp_y, real32) + END DO + END DO !$OMP END PARALLEL DO END IF - end subroutine +end subroutine end module From bbaf5a2a1332c1f372091e43688e3556ec545709 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 21 Dec 2022 16:43:06 +0100 Subject: [PATCH 22/25] add debug_flag to the derived type partit, but commented in the moment not to alternate existing partit derived types --- src/MOD_PARTIT.F90 | 3 +++ src/gen_model_setup.F90 | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index c51ae2221..4326230f0 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -64,6 +64,7 @@ module MOD_PARTIT integer :: npes integer :: mype integer :: maxPEnum=100 +!PS logical :: flag_debug=.false. integer, allocatable, dimension(:) :: part ! Mesh partition @@ -151,6 +152,7 @@ subroutine WRITE_T_PARTIT(partit, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) partit%npes write(unit, iostat=iostat, iomsg=iomsg) partit%mype write(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum +!PS write(unit, iostat=iostat, iomsg=iomsg) partit%flag_debug call write_bin_array(partit%part, unit, iostat, iomsg) write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D @@ -182,6 +184,7 @@ subroutine READ_T_PARTIT(partit, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) partit%npes read(unit, iostat=iostat, iomsg=iomsg) partit%mype read(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum +!PS read(unit, iostat=iostat, iomsg=iomsg) partit%flag_debug call read_bin_array(partit%part, unit, iostat, iomsg) read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 4f78d5610..d4a183be5 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -30,6 +30,9 @@ subroutine setup_model(partit) read (fileunit, NML=run_config) !!$ read (fileunit, NML=machine) close (fileunit) + +!PS partit%flag_debug=flag_debug + ! ========== ! compute dt ! ========== From 7e1411f85a92ebcd6a172c8cd95dbd9b38ed5698 Mon Sep 17 00:00:00 2001 From: dsidoren Date: Thu, 22 Dec 2022 12:38:26 +0100 Subject: [PATCH 23/25] Update gen_modules_diag.F90 zisotherm computation improved --- src/gen_modules_diag.F90 | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 18ae3d9cb..49e53de3b 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -806,6 +806,9 @@ subroutine compute_extflds(mode, dynamics, tracers, partit, mesh) if (firstcall) then !allocate the stuff at the first call allocate(zisotherm(myDim_nod2D+eDim_nod2D)) allocate(tempzavg(myDim_nod2D+eDim_nod2D), saltzavg(myDim_nod2D+eDim_nod2D)) + zisotherm=0.0_WP + tempzavg =0.0_WP + saltzavg =0.0_WP firstcall=.false. if (mode==0) return end if @@ -820,16 +823,15 @@ subroutine compute_extflds(mode, dynamics, tracers, partit, mesh) zn =0.0_WP do nz=nzmin+1, nzmax-1 tup=temp(nz-1, n) - tlo=temp(nz, n) - if (tup==tlo) cycle - if ((tup-whichtemp)*(tlo-whichtemp)<=0) then + if (tup < whichtemp) exit + tlo=temp(nz, n) + if ((tup-whichtemp)*(tlo-whichtemp)<0) then zn=zn+0.5_WP*(hnode(nz-1, n)+(whichtemp-tup)*sum(hnode(nz-1:nz, n))/(tlo-tup)) - zisotherm(n)=zn exit end if zn=zn+hnode(nz-1, n) end do -! if (tlo > whichtemp .AND. depth<=1.e-12) zisotherm=depth+hnode(nz, node) set the depth to the total depth if the isotherm is not found + zisotherm(n)=zn END DO !$OMP END PARALLEL DO From 9c6c24ed445db9648c081a0ae5bbbf272bdbbbea Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 22 Dec 2022 14:48:47 +0100 Subject: [PATCH 24/25] kickout additional debugging flags --- src/CMakeLists.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 14e030899..707e09beb 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -138,8 +138,6 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) else() target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -ip -init=zero -no-wrap-margin) endif() -# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) elseif(${FESOM_PLATFORM_STRATEGY} STREQUAL albedo) @@ -147,7 +145,10 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) else() target_compile_options(${PROJECT_NAME} PRIVATE -xHost) endif() - target_compile_options(${PROJECT_NAME} PRIVATE -g -traceback ) #-check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -g -traceback ) #-check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) # target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) From 54ea3d082bef5317022cf724bd5d13b48630483d Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 22 Dec 2022 14:49:41 +0100 Subject: [PATCH 25/25] comment output time measurement --- src/io_meandata.F90 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 924b4328d..de3014db0 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -852,14 +852,13 @@ subroutine write_mean(entry, entry_index) ! loop over vertical layers --> do gather 3d variables layerwise in 2d ! slices do lev=1, size1 - if (entry%p_partit%mype==entry%root_rank) t0=MPI_Wtime() + !PS if (entry%p_partit%mype==entry%root_rank) t0=MPI_Wtime() #ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS ! aleph cray-mpich workaround call MPI_Barrier(entry%comm, mpierr) #elif ENABLE_ALBEDO_INTELMPI_WORKAROUNDS call MPI_Barrier(entry%comm, mpierr) #endif - t0=MPI_Wtime() !___________________________________________________________________ ! local output variables are gahtered in 2d shaped entry%aux_r8 ! either for vertices or elements @@ -875,11 +874,11 @@ subroutine write_mean(entry, entry_index) if (entry%p_partit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) - t1=MPI_Wtime() + !PS t1=MPI_Wtime() !PS if (entry%p_partit%flag_debug) print *, achar(27)//'[31m'//' -I/O-> after nf_put_vara_real'//achar(27)//'[0m', entry%p_partit%mype, t1-t0 elseif (entry%ndim==2) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) - t1=MPI_Wtime() + !PS t1=MPI_Wtime() !PS if (entry%p_partit%flag_debug) print *, achar(27)//'[31m'//' -I/O-> after nf_put_vara_real'//achar(27)//'[0m', entry%p_partit%mype, lev, t1-t0 end if end if