diff --git a/.github/workflows/lumi-sync.yaml b/.github/workflows/lumi-sync.yaml new file mode 100644 index 000000000..11ca2531c --- /dev/null +++ b/.github/workflows/lumi-sync.yaml @@ -0,0 +1,28 @@ +name: Run CICD on Lumi (using .gitlab-ci.yml) + +on: + push: + branches: + - refactoring + - lumi_gpu_evatali + +jobs: + gitlabsync: + runs-on: ubuntu-latest + steps: + - name: "Check out code" + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: "Gitlab mirror and CI" + uses: "tiggi/gitlab-mirror-and-ci-action@tiggi/fixes" + with: + args: "https://gitlab.com/lumi-cicd/fesom2.git" + env: + FOLLOW_TAGS: "true" + FORCE_PUSH: "true" + GITLAB_HOSTNAME: "gitlab.com" + GITLAB_USERNAME: "tiggi" + GITLAB_PASSWORD: ${{ secrets.GITLAB_PASSWORD }} + GITLAB_PROJECT_ID: "51374059" + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..9822c32bf --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,12 @@ +fesom-job: + stage: build + variables: + SCHEDULER_PARAMETERS: "-N 1 -n 56 --mem=32G -p dev-g -t 00:30:00 -A project_462000376" + tags: + - lumi +# artifacts: +# paths: +# - fesom_build.log + script: + - echo "building fesom branch" + - bash -l configure.sh lumi diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..54d438de3 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,35 @@ +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.9" + # You can also specify other tool versions: + # nodejs: "20" + # rust: "1.70" + # golang: "1.20" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/conf.py + # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs + # builder: "dirhtml" + # Fail on all warnings to avoid broken references + # fail_on_warning: true + +# Optionally build your docs in additional formats such as PDF and ePub +# formats: +# - pdf +# - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 60a65d66f..3aab2f2c1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,6 +17,8 @@ set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs F set(CRAY OFF CACHE BOOL "compile with cray ftn") set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") set(OPENMP_REPRODUCIBLE OFF CACHE BOOL "serialize OpenMP loops that are critical for reproducible results") +set(USE_MULTIO OFF CACHE BOOL "Use MULTIO for IO, either grib or binary for now. This also means path to MULTIO installation has to provided using env MULTIO_INSTALL_PATH='..' and multio configuration yamls must be present to run the model with MULTIO") +set(OASIS_WITH_YAC OFF CACHE BOOL "Useing a version of OASIS compiled with YAC instead of SCRIP for interpolation?") #set(VERBOSE OFF CACHE BOOL "toggle debug output") #add_subdirectory(oasis3-mct/lib/psmile) diff --git a/cmake/FindMULTIO.cmake b/cmake/FindMULTIO.cmake new file mode 100644 index 000000000..5c86c30b8 --- /dev/null +++ b/cmake/FindMULTIO.cmake @@ -0,0 +1,50 @@ +# FindMULTIO.cmake + +include(FindPackageHandleStandardArgs) + +# Use the environment variable as a hint +set(MULTIO_HINT_PATH $ENV{MULTIO_INSTALL_PATH}) + +# Try to find the library +find_library(MULTIO_FAPI_LIBRARY + NAMES multio-fapi # Adjust this if the library name is different + HINTS ${MULTIO_HINT_PATH}/lib +) + +# Try to find the dependency library +find_library(MULTIO_API_LIBRARY + NAMES multio-api + HINTS ${MULTIO_HINT_PATH}/lib +) + +# Try to find the dependency library +find_library(MULTIO_LIBRARY + NAMES multio + HINTS ${MULTIO_HINT_PATH}/lib +) + +# Try to find the Fortran module path +find_path(MULTIO_MODULE_PATH + NAMES multio_api.mod # Replace with an actual module name you expect to find + HINTS ${MULTIO_HINT_PATH}/module ${MULTIO_HINT_PATH}/multio/module +) + + +# Aggregate the libraries for easier linking +set(MULTIO_LIBRARIES ${MULTIO_FAPI_LIBRARY} ${MULTIO_API_LIBRARY} ${MULTIO_LIBRARY}) + + +# Handle the results +find_package_handle_standard_args(MULTIO + REQUIRED_VARS MULTIO_LIBRARIES MULTIO_MODULE_PATH + FOUND_VAR MULTIO_FOUND +) + +# If found, set the MULTIO_LIBRARIES and MULTIO_INCLUDE_DIRS variables for easy use +if(MULTIO_FOUND) + set(MULTIO_INCLUDE_DIRS ${MULTIO_MODULE_PATH}) +endif() + +# Mark variables as advanced +mark_as_advanced(MULTIO_LIBRARY MULTIO_MODULE_PATH) + diff --git a/cmake/FindNETCDF.cmake b/cmake/FindNETCDF.cmake index 03b985d67..cbd823e8e 100644 --- a/cmake/FindNETCDF.cmake +++ b/cmake/FindNETCDF.cmake @@ -13,9 +13,13 @@ if(CMAKE_Fortran_COMPILER_LOADED) if(HAVE_Fortran_NETCDF) set(NETCDF_Fortran_INCLUDE_DIRECTORIES "") set(NETCDF_Fortran_LIBRARIES "") + set(NETCDF_Fortran_FOUND 1) else() - find_path(NETCDF_Fortran_INCLUDE_DIRECTORIES netcdf.inc HINTS $ENV{NETCDF_DIR}/include ENV NETCDF_Fortran_INCLUDE_DIRECTORIES) + find_path(NETCDF_Fortran_INCLUDE_DIRECTORIES netcdf.inc HINTS $ENV{NETCDF_ROOT}/include $ENV{NETCDF_DIR}/include $ENV{NETCDF4_DIR}/include ENV NETCDF_Fortran_INCLUDE_DIRECTORIES) find_library(NETCDF_Fortran_LIBRARIES netcdff HINTS ${NETCDF_Fortran_INCLUDE_DIRECTORIES}/../lib) + if( NETCDF_Fortran_INCLUDE_DIRECTORIES AND NETCDF_Fortran_LIBRARIES ) + set(NETCDF_Fortran_FOUND 1) + endif() endif() endif() @@ -27,14 +31,18 @@ if(CMAKE_C_COMPILER_LOADED OR CMAKE_CXX_COMPILER_LOADED) if(HAVE_C_NETCDF) set(NETCDF_C_INCLUDE_DIRECTORIES "") set(NETCDF_C_LIBRARIES "") + set(NETCDF_C_FOUND 1) else() - find_path(NETCDF_C_INCLUDE_DIRECTORIES netcdf.h HINTS $ENV{NETCDF_DIR}/include ENV NETCDF_C_INCLUDE_DIRECTORIES) + find_path(NETCDF_C_INCLUDE_DIRECTORIES netcdf.h HINTS $ENV{NETCDF_ROOT}/include $ENV{NETCDF_DIR}/include $ENV{NETCDF4_DIR}/include ENV NETCDF_C_INCLUDE_DIRECTORIES) find_library(NETCDF_C_LIBRARIES netcdf HINTS ${NETCDF_C_INCLUDE_DIRECTORIES}/../lib) + if( NETCDF_C_INCLUDE_DIRECTORIES AND NETCDF_C_LIBRARIES ) + set(NETCDF_C_FOUND 1) + endif() endif() endif() if(CMAKE_CXX_COMPILER_LOADED) - find_path(NETCDF_CXX_INCLUDE_DIRECTORIES netcdf HINTS $ENV{NETCDF_DIR}/include ENV NETCDF_CXX_INCLUDE_DIRECTORIES) + find_path(NETCDF_CXX_INCLUDE_DIRECTORIES netcdf HINTS $ENV{NETCDF_ROOT}/include $ENV{NETCDF_DIR}/include $ENV{NETCDF4_DIR} ENV NETCDF_CXX_INCLUDE_DIRECTORIES) # the cray toolchain (e.g. hlrn) disables dynamic linking by default. to enable it at build time do e.g. "CRAYPE_LINK_TYPE=dynamic make". find_library(NETCDF_CXX_LIBRARIES NAMES netcdf_c++4 netcdf-cxx4 HINTS ${NETCDF_CXX_INCLUDE_DIRECTORIES}/../lib) if(NETCDF_CXX_INCLUDE_DIRECTORIES AND NETCDF_C_INCLUDE_DIRECTORIES) @@ -43,4 +51,21 @@ if(CMAKE_CXX_COMPILER_LOADED) if(NETCDF_CXX_LIBRARIES AND NETCDF_C_LIBRARIES) list(APPEND NETCDF_CXX_LIBRARIES ${NETCDF_C_LIBRARIES}) endif() -endif() \ No newline at end of file + if( NETCDF_CXX_INCLUDE_DIRECTORIES AND NETCDF_CXX_LIBRARIES ) + set(NETCDF_CXX_FOUND 1) + endif() +endif() + +if(NOT ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) + set(${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS C) +endif() + +unset({CMAKE_FIND_PACKAGE_NAME}_REQUIRED_VARS) +foreach(COMP C CXX Fortran) + if("${COMP}" IN_LIST ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) + list(APPEND ${CMAKE_FIND_PACKAGE_NAME}_REQUIRED_VARS NETCDF_${COMP}_FOUND) + endif() +endforeach() +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(${CMAKE_FIND_PACKAGE_NAME} HANDLE_COMPONENTS REQUIRED_VARS ${CMAKE_FIND_PACKAGE_NAME}_REQUIRED_VARS) + diff --git a/cmake/FindOASISYAC.cmake b/cmake/FindOASISYAC.cmake new file mode 100644 index 000000000..b7927f62c --- /dev/null +++ b/cmake/FindOASISYAC.cmake @@ -0,0 +1,19 @@ +find_path(OASIS_Fortran_INCLUDE_DIRECTORIES mod_oasis.mod HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile) +find_library(OASIS_Fortran_LIBRARIES psmile HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile) + +find_path(MCT_Fortran_INCLUDE_DIRECTORIES mct_mod.mod HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile/mct) +find_library(MCT_Fortran_LIBRARIES mct HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile/mct) + +find_path(MPEU_Fortran_INCLUDE_DIRECTORIES m_mpout.mod HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile/mct) +find_library(MPEU_Fortran_LIBRARIES mpeu HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile/mct) + +find_path(SCRIP_Fortran_INCLUDE_DIRECTORIES remap_bicubic_reduced.mod HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile/scrip) +find_library(SCRIP_Fortran_LIBRARIES scrip HINTS ${TOPLEVEL_DIR}/../oasis/build/lib/psmile/scrip) + +find_path(YAC_Fortran_INCLUDE_DIRECTORIES mo_yac_utils.mod HINTS ${TOPLEVEL_DIR}/../oasis/lib/yac/include) +find_library(YACUTILS_Fortran_LIBRARIES yac_utils HINTS ${TOPLEVEL_DIR}/../oasis/lib/yac/lib) +find_library(YACCORE_Fortran_LIBRARIES yac_core HINTS ${TOPLEVEL_DIR}/../oasis/lib/yac/lib) + +find_path(YAXT_Fortran_INCLUDE_DIRECTORIES yaxt.mod HINTS ${TOPLEVEL_DIR}/../oasis/lib/yaxt/include) +find_library(YAXT_Fortran_LIBRARIES yaxt HINTS ${TOPLEVEL_DIR}/../oasis/lib/yaxt/lib) +find_library(YAXTC_Fortran_LIBRARIES yaxt_c HINTS ${TOPLEVEL_DIR}/../oasis/lib/yaxt/lib) diff --git a/config/namelist.cvmix b/config/namelist.cvmix index 18a90c979..2bf06d887 100644 --- a/config/namelist.cvmix +++ b/config/namelist.cvmix @@ -26,7 +26,9 @@ idemix_mu0 = 0.33333333 ! dissipation parameter (dimensionless) idemix_sforcusage = 0.2 idemix_n_hor_iwe_prop_iter = 5 ! iterations for contribution from horiz. wave propagation idemix_surforc_file = '/work/ollie/clidyn/forcing/IDEMIX/fourier_smooth_2005_cfsr_inert_rgrid.nc' +idemix_surforc_vname= 'var706' idemix_botforc_file = '/work/ollie/clidyn/forcing/IDEMIX/tidal_energy_gx1v6_20090205_rgrid.nc' +idemix_botforc_vname= 'wave_dissipation' / ! namelist for PP diff --git a/docs/getting_started/getting_started.rst b/docs/getting_started/getting_started.rst index af6b00035..227fd00de 100644 --- a/docs/getting_started/getting_started.rst +++ b/docs/getting_started/getting_started.rst @@ -8,25 +8,29 @@ This chapter describes several ways of getting started with FESOM2. First we sho TL;DR version for supported HPC systems ======================================= -Supported systems are: generic ``ubuntu``, ``ollie`` at AWI, ``mistral`` at DKRZ, ``JURECA`` at JSC, ``HLRN``, ``Hazel Hen``, ``Marinostrum 4`` at BSC. During configuration the system will be recognised and apropriate environment variables and compiler options should be used. +Supported systems are: generic ``ubuntu``, ``albedo`` at AWI, ``levante`` at DKRZ, ``JURECA`` at JSC, ``HLRN``, ``Hazel Hen``, ``MareNostrum 4`` at BSC. During configuration the system will be recognised and apropriate environment variables and compiler options should be used. + :: git clone https://github.com/FESOM/fesom2.git cd fesom2 - bash -l configure.sh + git checkout refactoring + bash -l ./configure.sh -Create file fesom.clock in the output directory with the following content (if you plan to run with COREII foring): +Create file ``fesom.clock`` in the output directory with the following content (if you plan to run with COREII forcing): :: - 0 1 1948 - 0 1 1948 + 0 1 1958 + 0 1 1958 + +after that, one has to adjust the run script for the target system and run it: -after that one has to adjust the run script for the target sustem and run it: :: cd work - sbatch job_ollie + sbatch job_albedo + Detailed steps of compiling and runing the code =============================================== @@ -47,17 +51,30 @@ Clone the GitHub repository with a git command: git clone https://github.com/FESOM/fesom2.git -The repository contains model code and two additional libraries: `Metis` (domain partitioner) and `Parms` (solver), necessary to run FESOM2. To build FESOM2 executable one have to compile Parms library and the code of the model (`src` folder). In order to build executable that is used for model domain partitioning (distribution of the model mesh between CPUs) one have to compile `Metis` library and also some code located in the src directory (see :ref:`partitioning`). Building of the model executable and the partitioner is usually done automatically with the use of CMake. If you going to build the code not on one of the supported platforms (ollie, DKRZ, HLRN, and HAZELHEN, general Ubuntu), you might need to do some (usually small) modifications described in `Adding new platform for compilation`_ section. +The repository contains model code and two additional libraries: `Metis` (domain partitioner) and `Parms` (solver), necessary to run FESOM2. To build FESOM2 executable one have to compile Parms library and the code of the model (`src` folder). In order to build executable that is used for model domain partitioning (distribution of the model mesh between CPUs) one have to compile `Metis` library and also some code located in the src directory (see :ref:`partitioning`). Building of the model executable and the partitioner is usually done automatically with the use of CMake. If you going to build the code not on one of the supported platforms (ollie, DKRZ, HLRN, HAZELHEN, and BSC, general Ubuntu), you might need to do some (usually small) modifications described in `Adding new platform for compilation`_ section. Change to the `fesom2` folder and execute: :: + cd fesom2 + git checkout refactoring + +As a good practice, if one wants to make modifications to the source code or any of the files, it is advisable to create a branch from refactoring: + +:: + + git checkout -b refactoring + +After confirming that the right FESOM2 branch is being used, compile the model with: + +:: + bash -l ./configure.sh In the best case scenario, your platform will be recognized and the Parms library and model executable will be built and copied to the bin directory. If something went wrong have a look at Troubleshooting_ section. -If you would like to select platform manually (which is nessesary in the case of Ubuntu, for eample), type: +If you would like to select platform manually (which is necessary in the case of Ubuntu, for example), type: :: @@ -67,7 +84,7 @@ If you would like to select platform manually (which is nessesary in the case of Data and mesh files ------------------- -The FESOM2 repository contains only very small example meshes and data (in the ``test`` directory, see the note below). However, if you want to run realistic simulations, you ether have to have them on your system, or download an archive with sample data. THere is a chance that your system already have some of the necesseary files, you can check it in the ``setups/paths.yml`` file. If not, the easiest way to start is to download example set from `DKRZ cloud`_ (12 Gb) by executing: +The FESOM2 repository contains only very small example meshes and data (in the ``test`` directory, see the note below). However, if you want to run realistic simulations, you ether have to have them on your system, or download an archive with sample data. There is a chance that your system already has some of the necesseary files, you can check it in the ``setups/paths.yml`` file. If not, the easiest way to start is to download example set from `DKRZ cloud`_ (12 Gb) by executing: :: @@ -94,20 +111,34 @@ The ``input`` folder contains files with initial conditions (``phc3.0``) and atm Preparing the run ------------------ -You have to do several basic things in order to prepare the run. First, create a directory where results will be stored. Usually, it is created in the model root directory: +You have to do several basic things in order to prepare the run. + +First, be aware of the files you need to modify according to your run configurations. Normally, those are: + +- ``namelist.config``: inside of the ``config`` folder. In this file you can set several configurations, such as the path to your mesh, climatology and results, as well as run length, units and start year of your run. + +- ``namelist.forcing``: inside of the ``config`` folder. In this file you can set the path to your forcing files. + +- ``job_``: inside of the ``work`` folder. In this file you can set other important configurations, such as the time, tasks and tasks per node you allocate to your run. + +The exact changes necessary to those file are indicated later in this documentation. Before doing so, create a directory to store your output. Usually, it is created in the model root directory: :: mkdir results -you might make a link to some other directory located on the part of the system where you have a lot of storage. In the results directory, you have to create ``fesom.clock`` file (NOTE, if you change ``runid`` in ``namelist.config`` to something like ``runid=mygreatrun``, the file will be named ``mygreatrun.clock``). Inside the file you have to put two identical lines: +You might make a link to some other directory located on the part of the system where you have a lot of storage. + +In your results directory, create a file named ``fesom.clock`` (NOTE: if you change ``runid`` in ``namelist.config`` to something like ``runid=mygreatrun``, the file will be named ``mygreatrun.clock``). + +Inside the file you have to put two identical lines: :: 0 1 1958 0 1 1958 -This is initial date of the model run, or the time of the `cold start` of your model. More detailed explanation of the clock file will be given in the `The clock file`_ section. +This is initial date of the model run, or the time of the `cold start` of your model. In case you want to start your run with a specific forcing from a specific year, substitute 1958 to the desired year. More detailed explanation of the clock file will be given in the `The clock file`_ section. The next step is to make some changes in the model configuration. All runtime options can be set in the namelists that are located in the config directory: @@ -115,14 +146,41 @@ The next step is to make some changes in the model configuration. All runtime op cd ../config/ -There are several configuration files, but we are only interested in the ``namelist.config`` for now. The options that you might want to change for your first FESOM2 run are: +As mentioned before, in this directory, you will normally have to change two files: ``namelist.config`` and ``namelist.forcing``. Both of these files ask for paths to initial conditions. Normally, these paths can be found under ``./setups/paths.yml``. + +Changing namelist.config +======================== + +In ``namelist.config``, the options that you might want to change for your first FESOM2 run are: + +- ``run_length``: length of the model run in run_length_unit (see below). + +- ``run_length_unit``: units of the run_length. Can be ``y`` (year), ``m`` (month), ``d`` (days), ``s`` (model steps). + +.. note:: you might need to adjust the run time to the length of your run. In some setups and/or for some machines, if you set ``run_length`` to 10 and ``run_length_unit`` to ``y``, for example, the run time needs to be enough for a 10-year run at once. -- ``run_length`` length of the model run in run_length_unit (see below). -- ``run_length_unit`` units of the run_length. Can be ``y`` (year), ``m`` (month), ``d`` (days), ``s`` (model steps). -- ``MeshPath`` - path to the mesh you would like to use (e.g. ``/youdir/FESOM2_one_year_input/mesh/pi/``, slash at the end is important!) -- ``ClimateDataPath`` - path to the folder with the file with model temperature and salinity initial conditions (e.g. ``/youdir/FESOM2_one_year_input/input/phc3.0/``). The name of the file with initial conditions is defined in `namelist.oce`, but during first runs you probably don't want to change it. +- ``yearnew``: define the same as the year in your ``fesom.clock``; -More detailed explination of options in the ``namelist.config`` is in the section :ref:`chap_general_configuration`. +- ``MeshPath``: path to the mesh you would like to use (e.g. ``/youdir/FESOM2_one_year_input/mesh/pi/``, slash at the end is important!); + +- ``ClimateDataPath``: path to the folder with the file with model temperature and salinity initial conditions (e.g. ``/youdir/FESOM2_one_year_input/input/phc3.0/``). The name of the file with initial conditions is defined in `namelist.oce`, but during first runs you probably don't want to change it; + +- ``ResultPath``: path to your results folder. The output of the model will be stored there. + +More detailed explanation of options in the ``namelist.config`` is in the section :ref:`chap_general_configuration`. + +Changing namelist.forcing +========================= + +In ``namelist.forcing``, the options you need to change for your first FESOM2 run depends on the forcing you decide to use to initialize your experiment. Please note that the year you initialize your experiment with needs to be included in the forcing data files. + +In section ``&nam_sbc``, change the path of all the files to the path to the forcing you have chosen. For example, if you want to initialize your experiment with JRA55 forcing on ``levante``, the path to each fiel will be: + +:: + +'/pool/data/AWICM/FESOM2/FORCING/JRA55-do-v1.4.0/' + +More detailed explanation of options in the ``namelist.forcing`` is in the section :ref:`chap_forcing_configuration`. Running the model ----------------- @@ -130,18 +188,28 @@ Running the model Change to the ``work`` directory. You should find several batch scripts that are used to submit model jobs to different HPC machines. The scripts also link ``fesom.x`` executable to the ``work`` directory and copy namelists with configurations from config folder. .. note:: - Model executable, namelists and job script have to be located in the same directory (usually ``work``). + Model executable, namelists and job script will be located in the same directory (usually ``work``). -If you are working on AWI's ``ollie`` supercomputer, you have to use ``job_ollie``, in other case use the job script for your specific platform, or try to modify one of the existing ones. +If you are working on AWI's ``albedo`` supercomputer, you have to use ``job_albedo``, in other case use the job script for your specific platform, or try to modify one of the existing ones. -.. note:: - One thing you might need to adjust in the job files is the number of cores, you would like to run the model on. For example, for SLURM it will be adjusting ``#SBATCH --ntasks=288`` value, and for simple ``mpirun`` command, that we have for ``job_ubuntu`` it will be argument for the ``-n`` option. It is necessary, that your mesh has the corresponding partitioning (``dist_xx`` folder, where ``xx`` is the number of cores). +In the job file, the changes are done based on the HPC you are using. For ``levante``, you should adapt for example: + +- ``#SBATCH --job-name``: name of your experiment; e.g. myexperiment_001; -On ``ollie`` the submission of your job is done by executing the following command: +- ``#SBATCH --ntasks-per-node``: number of cores per node. This number has to be divisible by the number of tasks. If you choose the ``ntasks``/4, for example, you will run your experiment with 4 nodes; + +- ``#SBATCH --ntasks``: number of cores. This number has to be the same of your desired mesh partitioning. It is the ``xx`` number in your ``dist_xx`` mesh folder; + +- ``#SBATCH --time``: be generous with your run time, in case you are running a longer simulation and the job is not being resubmmited after each time step; + +- ``#SBATCH -A ``: define your project account. + + +On ``levante`` the submission of your job is done by executing the following command: :: - sbatch job_ollie + sbatch job_levante The job is then submitted. In order to check the status of your job on ollie you can execute: @@ -149,7 +217,22 @@ The job is then submitted. In order to check the status of your job on ollie you squeue -u yourusername -Results of the model run should appear in the ``results`` directory that you have specified in the ``namelist.config``. After the run is finished the ``fesom.clock`` file (or if you change your runid, ``runid.clock``) will be updated with information about the time of your run's end, that allows running the next time portion of the model experiment by just resubmitting the job with ``sbatch job_ollie``. +The output of the model run should appear in the ``results`` directory that you have specified in the ``namelist.config``. After the run is finished the ``fesom.clock`` file (or if you change your runid, ``runid.clock``) will be updated with information about the time of your run's end, that allows running the next time portion of the model experiment by just resubmitting the job with ``sbatch job_ollie``. + +Some files will also be stored on the work folder. Those are + +- A file containing information about errors during job preparation and submission, usually containing ``err.out`` in its name; + +- A file containing information about the job itself, such as duration, folders, etc, usually contining ``out.out`` in its name; + +- A file containing information about the simulation, usually called ``fesom2-0.out``; + +- A binary file ``fesom.x`` specific to that simulation; + +- A copy of the namelists used to define the configurations of your run. + +In case your simulation crashes, usually the job error file or ``fesom2-0.out`` contain valuable information to either fix the issue causing the crash or to give the developers an idea of what can be done to help you. + Other things you need to know earlier on ======================================== @@ -378,12 +461,12 @@ The best way to run the model locally is to use Docker container. You obviously - Get the image:: - docker pull koldunovn/fesom2_test:fesom2.1 + docker pull koldunovn/fesom2_test:refactoring2 - Go to the folder with your version of fesom2 folder (NOT inside fesom2 folder, one up, the one you run ``git clone https://github.com/FESOM/fesom2.git`` in). - Run:: - docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:fesom2.1 /bin/bash + docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:refactoring2 /bin/bash - This should get you inside the container. You now can edit the files in your fesom2 folder (on host system), but run compule and run the model inside the container. - When inside the container, to compile do: @@ -391,6 +474,7 @@ The best way to run the model locally is to use Docker container. You obviously :: cd fesom2 + git checkout refactoring bash -l configure.sh ubuntu - To prepare the run (this will do the test with pi mesh):: diff --git a/docs/index.rst b/docs/index.rst index 8dfe72d41..28af8b1fb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,6 +6,15 @@ FESOM2 documentation ==================== +The Finite volumE Sea Ice-Ocean Model (FESOM2). + +Multi-resolution ocean general circulation model that solves +the equations of motion describing the ocean and sea ice using +finite-volume methods on unstructured computational grids. The +model is developed and supported by researchers at the Alfred +Wegener Institute, Helmholtz Centre for Polar and Marine +Research (AWI), in Bremerhaven, Germany. + Authors ------- diff --git a/docs/requirements.txt b/docs/requirements.txt index d4b6b966a..302b80753 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,3 @@ sphinx==4.1.2 sphinxcontrib-bibtex==2.3.0 +sphinx_rtd_theme diff --git a/env.sh b/env.sh index a073ad208..5ac127a22 100755 --- a/env.sh +++ b/env.sh @@ -32,8 +32,17 @@ if [[ -z "$1" ]] || [[ "$1" =~ ^- ]]; then # no argument given LOGINHOST="$(hostname -f)" else - LOGINHOST=$1 # 1st arg exists and doesn't start with -, meaning it is machine specification + MACHINESPEC=$1 # 1st arg exists and doesn't start with -, meaning it is machine specification shift # pop the argument as we already stored it, remaining arguments are passed to cmake + + # check if given machine spec has + in it. if so save the later part as compilerid + if [[ $MACHINESPEC == *"+"* ]]; then + LOGINHOST="${MACHINESPEC%%+*}" # Everything before the '+' + COMPILERID="${MACHINESPEC#*+}" # Everything after the '+' + else + LOGINHOST="$MACHINESPEC" + COMPILERID="" + fi fi @@ -41,11 +50,6 @@ if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then STRATEGY="mistral.dkrz.de" elif [[ $LOGINHOST =~ ^levante ]] || [[ $LOGINHOST =~ ^l[:alnum:]+\.lvt\.dkrz\.de$ ]]; then STRATEGY="levante.dkrz.de" - # following regex only matches if input is 2 word like levante.nvhpc, this enables using different shells for a machine directly - compid_regex="^([[:alnum:]]+)\.([[:alnum:]]+)$" - if [[ $LOGINHOST =~ $compid_regex ]]; then - COMPILERID="${BASH_REMATCH[2]}" - fi elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then STRATEGY="ollie" elif [[ $LOGINHOST =~ ^albedo[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then @@ -58,9 +62,9 @@ elif [[ $LOGINHOST =~ \.hww\.de$ ]] || [[ $LOGINHOST =~ ^nid[0-9]{5}$ ]]; then STRATEGY="hazelhen.hww.de" elif [[ $LOGINHOST =~ \.jureca$ ]]; then STRATEGY="jureca" -elif [[ $LOGINHOST = ubuntu ]]; then +elif [[ $LOGINHOST =~ ^ubuntu ]]; then STRATEGY="ubuntu" -elif [[ $LOGINHOST = bsc ]]; then +elif [[ $LOGINHOST =~ ^bsc ]]; then STRATEGY="bsc" elif [[ $LOGINHOST =~ ^juwels[0-9][0-9].ib.juwels.fzj.de$ ]]; then STRATEGY="juwels" @@ -94,6 +98,7 @@ else export FESOM_PLATFORM_STRATEGY=$STRATEGY SHELLFILE="${DIR}/env/${STRATEGY}/shell" if [[ -n ${COMPILERID} ]]; then + echo "Compiler ID for shell is: ${COMPILERID}" SHELLFILE="${SHELLFILE}.${COMPILERID}" fi if [[ ! -e ${SHELLFILE} ]]; then diff --git a/env/atosecmwf/shell b/env/atosecmwf/shell index 90f118607..97cd921a7 100644 --- a/env/atosecmwf/shell +++ b/env/atosecmwf/shell @@ -33,7 +33,5 @@ module load cmake/3.20.2 module load ninja/1.10.0 module load fcm/2019.05.0 -export NETCDF_DIR=$NETCDF4_DIR +export FC=ifort CC=icc CXX=icpc -export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly -#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly diff --git a/env/lumi/shell b/env/lumi/shell index 40a2cb047..aaf0f589c 100644 --- a/env/lumi/shell +++ b/env/lumi/shell @@ -11,10 +11,10 @@ module load cray-netcdf/4.8.1.5 export FC=ftn export CC=cc export CXX=cc -export NETCDF_Fortran_INCLUDE_DIRECTORIES=$CRAY_NETCDF_DIR/include -export NETCDF_C_INCLUDE_DIRECTORIES=$CRAY_NETCDF_DIR/include -export NETCDF_C_LIBRARIES=$CRAY_NETCDF_DIR/lib -export NETCDF_Fortran_LIBRARIES=$CRAY_NETCDF_DIR/lib +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=$CRAY_NETCDF_DIR/include +#export NETCDF_C_INCLUDE_DIRECTORIES=$CRAY_NETCDF_DIR/include +#export NETCDF_C_LIBRARIES=$CRAY_NETCDF_DIR/lib +#export NETCDF_Fortran_LIBRARIES=$CRAY_NETCDF_DIR/lib $CC -v $FC -V $CXX -v diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index 3cb7b48cf..7f85d5eef 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -6,7 +6,10 @@ project(parms C) set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt file(GLOB all_sources ${src_home}/src/*.c ${src_home}/src/DDPQ/*.c) -include("${CMAKE_CURRENT_LIST_DIR}/../../cmake/FindBLAS.cmake") +find_package(BLAS) # standard way to find blas +if( NOT BLAS_FOUND) # try older way to find blas + include("${CMAKE_CURRENT_LIST_DIR}/../../cmake/FindBLAS.cmake") +endif() # create our library (set its name to name of this project) if(${BUILD_FESOM_AS_LIBRARY}) @@ -14,14 +17,24 @@ if(${BUILD_FESOM_AS_LIBRARY}) else() add_library(${PROJECT_NAME} ${all_sources}) endif() -target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS USE_MPI REAL=double DBL FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 HAS_BLAS) + +if(${BLAS_FOUND}) + message("BLAS FOUND ---------------------------") + target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS USE_MPI REAL=double DBL FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 HAS_BLAS) + target_link_libraries(${PROJECT_NAME} INTERFACE ${BLAS_C_LIBRARIES} $ENV{UBUNTU_BLAS_LIBRARY}) +else() + message("BLAS NOT FOUND ***********************") + target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS USE_MPI REAL=double DBL FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8) + target_link_libraries(${PROJECT_NAME} INTERFACE $ENV{UBUNTU_BLAS_LIBRARY}) +endif() + target_include_directories(${PROJECT_NAME} PRIVATE ${src_home}/src/../include ${src_home}/src/include INTERFACE ${src_home}/src/../include ) -target_link_libraries(${PROJECT_NAME} INTERFACE ${BLAS_C_LIBRARIES} $ENV{UBUNTU_BLAS_LIBRARY}) if(${CMAKE_C_COMPILER_ID} STREQUAL "Intel") - target_compile_options(${PROJECT_NAME} PRIVATE -no-prec-div -no-prec-sqrt -fast-transcendentals -fp-model precise) + target_compile_options(${PROJECT_NAME} PRIVATE -no-prec-div -no-prec-sqrt -fast-transcendentals -fp-model precise) + if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) endif() diff --git a/lib/parms/src/DDPQ/arms2.c b/lib/parms/src/DDPQ/arms2.c index d7a4ce88d..5dad20138 100755 --- a/lib/parms/src/DDPQ/arms2.c +++ b/lib/parms/src/DDPQ/arms2.c @@ -227,14 +227,15 @@ static int parms_arms_getssize_vcsr(parms_Operator self) } static struct parms_Operator_ops parms_arms_sol_vptr = { - parms_arms_sol_vcsr, - parms_arms_lsol_vcsr, - parms_arms_invs_vcsr, - parms_arms_ascend_vcsr, - parms_arms_getssize_vcsr, - parms_arms_nnz, - arms_free_vcsr, - arms_view_vcsr + parms_arms_sol_vcsr, /* apply */ + parms_arms_lsol_vcsr, /* lsol */ + parms_arms_invs_vcsr, /* invs */ + NULL, /* getu !!! WARNING, UNASSIGNED !!! */ + parms_arms_ascend_vcsr, /* ascend */ + parms_arms_getssize_vcsr, /* getssize */ + parms_arms_nnz, /* getnnz */ + arms_free_vcsr, /* operator_free */ + arms_view_vcsr /* operator_view */ }; diff --git a/lib/parms/src/DDPQ/sets.c b/lib/parms/src/DDPQ/sets.c index 1cfb68b68..1ccf3adb3 100755 --- a/lib/parms/src/DDPQ/sets.c +++ b/lib/parms/src/DDPQ/sets.c @@ -11,7 +11,7 @@ void parms_errexit( char *f_str, ... ) { va_list argp; - char out1[256], out2[256]; + char out1[256], out2[512]; va_start(argp, f_str); vsprintf(out1, f_str, argp); @@ -19,7 +19,7 @@ void parms_errexit( char *f_str, ... ) sprintf(out2, "Error! %s\n", out1); - fprintf(stdout, out2); + fprintf(stdout, "%s", out2); fflush(stdout); exit( -1 ); diff --git a/lib/parms/src/parms_ilu_vcsr.c b/lib/parms/src/parms_ilu_vcsr.c index 116aa51ba..17169e210 100755 --- a/lib/parms/src/parms_ilu_vcsr.c +++ b/lib/parms/src/parms_ilu_vcsr.c @@ -1683,14 +1683,15 @@ int parms_ilu_update(parms_Mat self, parms_FactParam param, void *mat, if(ABS_VALUE(t1) < DBL_EPSILON) continue; if( ii+start < schur_start ){ - for(jj = 1; jj < nnz; jj++) { - iw = jw[rowjj[jj]]; - if(iw != -1) - if(iw < ii+start) - rowm[iw] -= t1*rowmm[jj]; - else - data->U->pa[ii+start][iw-ii-start] -= t1*rowmm[jj]; - } + for(jj = 1; jj < nnz; jj++) { + iw = jw[rowjj[jj]]; + if(iw != -1) { + if(iw < ii+start) + rowm[iw] -= t1*rowmm[jj]; + else + data->U->pa[ii+start][iw-ii-start] -= t1*rowmm[jj]; + } + } } else { for(jj = 1; jj < nnz; jj++){ diff --git a/lib/parms/src/parms_pc_schurras.c b/lib/parms/src/parms_pc_schurras.c index 979acdfe9..505dfc1c2 100755 --- a/lib/parms/src/parms_pc_schurras.c +++ b/lib/parms/src/parms_pc_schurras.c @@ -10,6 +10,11 @@ #endif #endif +/* Forward declarations */ +int parms_OperatorGetU(parms_Operator self, void **mat); +int parms_MatGetOffDiag(parms_Mat self, void **mat); +int parms_CommGetOdvlist(parms_Comm self, int **odvlist); + typedef struct schurras_data { parms_Operator op_out,op_in; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 79307ff0b..62049b853 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,6 +2,9 @@ cmake_minimum_required(VERSION 3.9) project(fesom C Fortran) +find_package(MPI REQUIRED) + +#TODO: these machine specific changes must be done using cmake toolchain then here otherwise will make cmake another raps. if(DEFINED ENV{FESOM_PLATFORM_STRATEGY}) set(FESOM_PLATFORM_STRATEGY $ENV{FESOM_PLATFORM_STRATEGY} CACHE STRING "switch to platform specific compile settings, this is usually determined via the env.sh script") else() @@ -19,6 +22,7 @@ else() option(ALBEDO_INTELMPI_WORKAROUNDS "workaround for performance issues on albedo" OFF) endif() +#TODO: these machine specific changes must be done using cmake toolchain then here otherwise will make cmake another raps. if(ALEPH_CRAYMPICH_WORKAROUNDS) # todo: enable these options only for our targets @@ -35,6 +39,7 @@ if(ALBEDO_INTELMPI_WORKAROUNDS) add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) endif() +#TODO: this machine specific block can easyly go in cmake toolchain. if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) message(STATUS "multithreading disabled for Levante") # multithreading suddenly produces an error, disable it until a fix is found. issue #413 option(DISABLE_MULTITHREADING "disable asynchronous operations" ON) @@ -66,15 +71,21 @@ if(${USE_ICEPACK}) file(GLOB sources_Fortran ${src_home}/*.F90 ${src_home}/icepack_drivers/*.F90 ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) -elseif(${BUILD_FESOM_AS_LIBRARY}) +elseif(${BUILD_FESOM_AS_LIBRARY}) # TODO: assumes multio is always on file(GLOB sources_Fortran ${src_home}/*.F90 ${src_home}/ifs_interface/*.F90) # ICEPACK + LIBRARY NOT SUPPORTED (YET) else() file(GLOB sources_Fortran ${src_home}/*.F90) endif() + +if(${USE_MULTIO}) + list(APPEND sources_Fortran ${src_home}/ifs_interface/iom.F90 ${src_home}/ifs_interface/mpp_io.F90) +endif() + + #list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) file(GLOB sources_C ${src_home}/*.c) -list(REMOVE_ITEM sources_C ${src_home}/psolve_feom.c) # does the file still exist? +#list(REMOVE_ITEM sources_C ${src_home}/psolve_feom.c) # this file doesn't exist! please check and remove this # generate a custom file from fesom_version_info.F90 which includes the current git SHA set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) @@ -101,7 +112,10 @@ add_subdirectory(../lib/parms ${PROJECT_BINARY_DIR}/parms) add_subdirectory(async_threads_cpp) -include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) +set(CMAKE_MODULE_PATH ${CURRENT_LIST_DIR}../cmake) + +find_package(NETCDF REQUIRED) +#include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) if(${BUILD_FESOM_AS_LIBRARY}) add_library(${PROJECT_NAME}_C ${sources_C}) @@ -121,11 +135,21 @@ else() add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fesom_main.F90) endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) +if(${USE_MULTIO}) + target_compile_definitions(${PROJECT_NAME} PRIVATE __MULTIO) +endif() if(${DISABLE_MULTITHREADING}) target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) endif() if(${FESOM_COUPLED}) - include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) + message(STATUS "FESOM is coupled") + if(${OASIS_WITH_YAC}) + message(STATUS "Looking for OASIS with YAC lib") + include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASISYAC.cmake) + else() + message(STATUS "Looking for OASIS") + include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) + endif() target_compile_definitions(${PROJECT_NAME} PRIVATE __oasis) endif() if(${OIFS_COUPLED}) @@ -146,6 +170,7 @@ endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) if(${BUILD_FESOM_AS_LIBRARY}) + # TODO: not the best idea to use -xHost will stop from cross compiling tests in CI target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin -fpe0) # add -fpe0 for RAPS environment else() target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -ip -init=zero -no-wrap-margin) @@ -193,15 +218,31 @@ elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL NVHPC ) endif() endif() if(${BUILD_FESOM_AS_LIBRARY}) + + set(CMAKE_POSITION_INDEPENDENT_CODE ON) # not sure below are needed anymore target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) target_compile_options(${PROJECT_NAME}_C PRIVATE -fPIC) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) +target_include_directories(${PROJECT_NAME} PRIVATE ${YAC_Fortran_INCLUDE_DIRECTORIES} ${YAXT_Fortran_INCLUDE_DIRECTORIES}) target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES} ${OASIS_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) + +# OASIS-YAK +target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${YACUTILS_Fortran_LIBRARIES} ${YACCORE_Fortran_LIBRARIES}) +target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${YAXT_Fortran_LIBRARIES} ${YAXTC_Fortran_LIBRARIES}) + +# MULTIO +if(${USE_MULTIO}) + find_package(MULTIO REQUIRED) + target_link_libraries(${PROJECT_NAME} ${MULTIO_LIBRARIES}) + target_include_directories(${PROJECT_NAME} PRIVATE ${MULTIO_INCLUDE_DIRS}) +endif() + target_link_libraries(${PROJECT_NAME} async_threads_cpp) + set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) if(${ENABLE_OPENMP} AND NOT ${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray) target_compile_options(${PROJECT_NAME} PRIVATE ${OpenMP_Fortran_FLAGS}) # currently we only have OpenMP in the Fortran part @@ -209,13 +250,18 @@ if(${ENABLE_OPENMP} AND NOT ${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray) endif() + set(FESOM_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/.." CACHE FILEPATH "directory where FESOM will be installed to via 'make install'") if(${BUILD_FESOM_AS_LIBRARY}) install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") install(TARGETS ${PROJECT_NAME}_C DESTINATION "${FESOM_INSTALL_PREFIX}/lib") + # below may be needed if we want to use fesom in ec-bundle + #install(TARGETS ${PROJECT_NAME} EXPORT ifs_sp-targets DESTINATION "${FESOM_INSTALL_PREFIX}/lib") + #install(TARGETS ${PROJECT_NAME}_C EXPORT ifs_sp-targets DESTINATION "${FESOM_INSTALL_PREFIX}/lib") + else() set(FESOM_INSTALL_FILEPATH "${FESOM_INSTALL_PREFIX}/bin/fesom.x") -get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) -get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) -install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) + get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) + get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) + install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) endif() diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 index 5c8a598af..3a76761bf 100644 --- a/src/MOD_PARTIT.F90 +++ b/src/MOD_PARTIT.F90 @@ -47,10 +47,10 @@ module MOD_PARTIT integer :: eDim_nod2D integer, allocatable, dimension(:) :: myList_nod2D - integer :: myDim_elem2D + integer :: myDim_elem2D, myDim_elem2D_shrinked integer :: eDim_elem2D integer :: eXDim_elem2D - integer, allocatable, dimension(:) :: myList_elem2D + integer, allocatable, dimension(:) :: myList_elem2D, myInd_elem2D_shrinked integer :: myDim_edge2D integer :: eDim_edge2D @@ -58,6 +58,7 @@ module MOD_PARTIT integer :: pe_status = 0 ! if /=0 then something is wrong integer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + integer :: MPI_COMM_WORLD ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) ! MPI Datatypes for interface exchange ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) @@ -75,6 +76,7 @@ module MOD_PARTIT integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) integer :: MPIERR + !!! remPtr_* are constructed during the runtime and shall not be dumped!!! integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 old mode 100755 new mode 100644 index 9e2829ae2..91f2225d9 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -2,16 +2,13 @@ module cpl_driver !====================================================================== ! - ! for coupling between the FESOM and an AOGCM using OASIS3-MCT + ! for coupling between the FESOM ocean ECHAM6 atmosphere using OASIS3-MCT ! !===================================================================== ! History : ! 09-09 (R. Redler, Germany) Original code ! 09-09 (K.Fieg, AWI Germany) Adjustment for FESOM ! 07-12 (D.Barbi, AWI Germany) Switch to ECHAM6.1 and OASIS3-MCT - ! 01-19 (J.Streffing, AWI Germany) Added OpenIFS coupling - ! 03-23 (J.Streffing, AWI Germany) Added corner point computation - ! for 1st order conserv remapping !---------------------------------------------------------------------- ! Modules used ! @@ -94,6 +91,217 @@ module cpl_driver contains + subroutine node_contours(my_x_corners, my_y_corners, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_PARAM + use g_comm_auto + use o_ARRAYS + use g_rotate_grid + + IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), allocatable, intent(inout) :: my_x_corners(:,:) ! longitude node corners + real(kind=WP), allocatable, intent(inout) :: my_y_corners(:,:) ! latitude node corners + integer :: bEdge_left, bEdge_right + integer, dimension(2) :: belem_left, belem_right + integer :: edge_left, edge_right + integer :: n, ee, elem, nn, el(2), flag, nn1, nn2 + integer :: current_pos + integer :: pos_increment=-1 ! counter clockwise is negative, otherwise +1! + integer, allocatable, dimension(:) :: nedges, nelems, nedges1, nelems1, nedges2, nelems2 + real(kind=WP) :: this_x_coord, this_y_coord + +include "associate_part_def.h" +include "associate_mesh_def.h" +include "associate_part_ass.h" +include "associate_mesh_ass.h" + + if (.not. allocated(my_x_corners)) then + ALLOCATE(my_x_corners(myDim_nod2D, 25)) !maxval(nod_in_elem2D_num, 1)*2+2)) + endif + if (.not. allocated(my_y_corners)) then + ALLOCATE(my_y_corners(myDim_nod2D, 25)) !maxval(nod_in_elem2D_num, 1)*2+2)) + endif + do n=1, myDim_nod2D + ! find the type/of node: internal or at boundary + bEdge_left =0 + belem_left =0 + bEdge_right=0 + belem_right=0 + + do ee=1, nod_in_elem2D_num(n) + elem=nod_in_elem2D(ee,n) + if (elem2D_nodes(1,elem)==n) then + edge_left=elem_edges(3,elem) + edge_right=elem_edges(2,elem) + elseif (elem2D_nodes(2,elem)==n) then + edge_left=elem_edges(1,elem) + edge_right=elem_edges(3,elem) + else + edge_left=elem_edges(2,elem) + edge_right=elem_edges(1,elem) + end if + if (myList_edge2D(edge_left)>edge2D_in) then + bEdge_left=bEdge_left+1 + belem_left(bEdge_left)=elem + end if + if (myList_edge2D(edge_right)>edge2D_in) then + bEdge_right=bEdge_right+1 + belem_right(bEdge_right)=elem + end if + end do + + ! now we have three cases + if (bEdge_left==0) then ! inner contour + elem=nod_in_elem2D(1, n) ! we can start from any + allocate(nedges(nod_in_elem2D_num(n))) + nedges=0 + allocate(nelems(nod_in_elem2D_num(n))) + nelems=0 + !!!!!!! inner_node_contour +include "node_contour_inner.h" + if (pos_increment<0) then + current_pos=2*nod_in_elem2D_num(n) + else + current_pos =1 + end if + do nn=1, nod_in_elem2D_num(n) + call edge_center(edges(1, nedges(nn)), edges(2, nedges(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + call elem_center(nelems(nn), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + end do + current_pos=2*nod_in_elem2D_num(n)+1 + do nn=current_pos, size(my_x_corners, 2) + my_x_corners(n, nn)=my_x_corners(n, current_pos-1) + my_y_corners(n, nn)=my_y_corners(n, current_pos-1) + end do + deallocate(nedges, nelems) + end if + + + if (bEdge_left==1) then ! standard boundary node + elem=belem_left(1) + allocate(nedges(nod_in_elem2D_num(n)+1)) + nedges=0 + allocate(nelems(nod_in_elem2D_num(n))) + nelems=0 + !!!!!!!boundary_node_contour +include "node_contour_boundary.h" + if (pos_increment<0) then + current_pos=2*nod_in_elem2D_num(n)+2 !one more for the node n itself also we have 2 boundary edges + else + current_pos =1 + end if + do nn=1, nod_in_elem2D_num(n) + call edge_center(edges(1, nedges(nn)), edges(2, nedges(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + call elem_center(nelems(nn), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + end do + nn=nod_in_elem2D_num(n)+1 + call edge_center(edges(1, nedges(nn)), edges(2, nedges(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + my_x_corners(n, current_pos)=coord_nod2D(1,n) + my_y_corners(n, current_pos)=coord_nod2D(2,n) + current_pos=2*nod_in_elem2D_num(n)+3 + do nn=current_pos, size(my_x_corners, 2) + my_x_corners(n, nn)=my_x_corners(n, current_pos-1) + my_y_corners(n, nn)=my_y_corners(n, current_pos-1) + end do + !!!!!!! + deallocate(nedges, nelems) + end if + + if (bEdge_left==2) then ! strange boundary node + elem=belem_left(1) + allocate(nedges (nod_in_elem2D_num(n)+1)) + allocate(nedges1(nod_in_elem2D_num(n)+1)) + nedges =0 + nedges1=0 + allocate(nelems (nod_in_elem2D_num(n))) + allocate(nelems1(nod_in_elem2D_num(n))) + nelems=0 + nelems1=0 + if (pos_increment<0) then + current_pos=2*nod_in_elem2D_num(n)+4 !two more for the node n itself also we have 4 boundary edges + else + current_pos =1 + end if + !!!!!!!boundary_node_contour +include "node_contour_boundary.h" + where (nedges>0) + nedges1=nedges + end where + where (nelems>0) + nelems1=nelems + end where + nn1=nn + do nn=1, nn1 + call edge_center(edges(1, nedges1(nn)), edges(2, nedges1(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + call elem_center(nelems1(nn), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + end do + nn=nn1+1 + call edge_center(edges(1, nedges1(nn)), edges(2, nedges1(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + nn=nn1+2 + my_x_corners(n, current_pos)=coord_nod2D(1,n) + my_y_corners(n, current_pos)=coord_nod2D(2,n) + current_pos=current_pos+pos_increment + !!!!!!! + elem=belem_left(2) + allocate(nedges2(nod_in_elem2D_num(n)+1)) + nedges =0 + nedges2=0 + allocate(nelems2(nod_in_elem2D_num(n))) + nelems =0 + nelems2=0 + !!!!!!!boundary_node_contour +include "node_contour_boundary.h" + where (nedges>0) + nedges2=nedges + end where + where (nelems>0) + nelems2=nelems + end where + nn2=nn + do nn=nn1+3, nn1+nn2+2 + call edge_center(edges(1, nedges2(nn)), edges(2, nedges2(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + call elem_center(nelems2(nn), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + end do + nn=nn1+nn2+3 + call edge_center(edges(1, nedges2(nn)), edges(2, nedges2(nn)), my_x_corners(n, current_pos), my_y_corners(n, current_pos), mesh) + current_pos=current_pos+pos_increment + nn=nn1+nn2+4 + my_x_corners(n, nn)=coord_nod2D(1,n) + my_y_corners(n, nn)=coord_nod2D(2,n) + current_pos=2*nod_in_elem2D_num(n)+5 + do nn=current_pos, size(my_x_corners, 2) + my_x_corners(n, nn)=my_x_corners(n, current_pos-1) + my_y_corners(n, nn)=my_y_corners(n, current_pos-1) + end do + !!!!!!! + deallocate(nedges, nelems, nedges1, nelems1, nedges2, nelems2) + end if + end do + do n=1, myDim_nod2D + do nn=1, size(my_x_corners, 2) + this_x_coord=my_x_corners(n, nn) + this_y_coord=my_y_corners(n, nn) + call r2g(my_x_corners(n, nn), my_y_corners(n, nn), this_x_coord, this_y_coord) + end do + end do + my_x_corners=my_x_corners/rad + my_y_corners=my_y_corners/rad + end subroutine node_contours + subroutine cpl_oasis3mct_init(partit, localCommunicator ) USE MOD_PARTIT implicit none @@ -208,8 +416,8 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) integer :: il_flag logical :: new_points - integer :: i, j, k ! local loop indicees - integer :: l,m,n, done ! local loop indicees + integer :: i, j, k ! local loop indicees + integer :: l,m ! local loop indicees character(len=32) :: point_name ! name of the grid points @@ -218,46 +426,29 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) integer :: counts_from_all_pes(partit%npes) integer :: displs_from_all_pes(partit%npes) integer :: my_displacement - integer :: my_max_elem(partit%npes) - integer :: my_max_edge(partit%npes) - integer :: all_max_elem, all_max_edge, n_neg, n_pos - integer :: el(2), enodes(2), edge - - integer,allocatable :: unstr_mask(:,:), coastal_edge_list(:,:) - real(kind=WP) :: max_x ! max longitude on corners of control volume - real(kind=WP) :: min_x ! min longitude on corners of control volume - real(kind=WP) :: temp ! temp storage for corner sorting + + integer,allocatable :: unstr_mask(:,:) real(kind=WP) :: this_x_coord ! longitude coordinates real(kind=WP) :: this_y_coord ! latitude coordinates - real(kind=WP) :: this_x_corners ! longitude node corners - real(kind=WP) :: this_y_corners ! latitude node corners ! ! Corner data structure for a OASIS3-MCT Reglonlatvrt grid ! - real(kind=WP), allocatable :: pos_x(:) ! longitude to the right of dateline - real(kind=WP), allocatable :: pos_y(:) ! latitude to the right of dateline - real(kind=WP), allocatable :: neg_x(:) ! longitude to the left of dateline - real(kind=WP), allocatable :: neg_y(:) ! latitude to the left of dateline - real(kind=WP), allocatable :: temp_x_coord(:) ! longitude coordinates - real(kind=WP), allocatable :: temp_y_coord(:) ! longitude coordinates real(kind=WP), allocatable :: my_x_coords(:) ! longitude coordinates real(kind=WP), allocatable :: my_y_coords(:) ! latitude coordinates - real(kind=WP), allocatable :: angle(:,:) ! array for holding corner angle for sorting - real(kind=WP), allocatable :: my_x_corners(:,:) ! longitude node corners - real(kind=WP), allocatable :: my_y_corners(:,:) ! latitude node corners - real(kind=WP), allocatable :: coord_e_edge_center(:,:,:) ! edge center coords + real(kind=WP), allocatable :: all_x_coords(:, :) ! longitude coordinates real(kind=WP), allocatable :: all_y_coords(:, :) ! latitude coordinates - real(kind=WP), allocatable :: all_x_corners(:,:,:) ! longitude node corners - real(kind=WP), allocatable :: all_y_corners(:,:,:) ! latitude node corners real(kind=WP), allocatable :: all_area(:,:) - logical, allocatable :: coastal_nodes(:) + real(kind=WP), allocatable :: my_x_corners(:,:) ! local longitude node corners + real(kind=WP), allocatable :: my_y_corners(:,:) ! local latitude node corners + real(kind=WP), allocatable :: all_x_corners(:,:,:) ! global longitude node corners + real(kind=WP), allocatable :: all_y_corners(:,:,:) ! global latitude node corners -#include "associate_part_def.h" -#include "associate_mesh_def.h" -#include "associate_part_ass.h" -#include "associate_mesh_ass.h" +include "associate_part_def.h" +include "associate_mesh_def.h" +include "associate_part_ass.h" +include "associate_mesh_ass.h" #ifdef VERBOSE @@ -307,39 +498,13 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) my_displacement = SUM(counts_from_all_pes(1:mype)) endif - CALL MPI_BARRIER(MPI_COMM_FESOM, ierror) - - my_max_elem=0 - my_max_elem = maxval(nod_in_elem2D_num(1:myDim_nod2D)) - all_max_elem = 0 - call MPI_Allreduce(my_max_elem, all_max_elem, & - 1, MPI_INTEGER, MPI_MAX, & - MPI_COMM_FESOM, MPIerr) - - my_max_edge=0 - my_max_edge=maxval(nn_num) - all_max_edge=0 - call MPI_AllREDUCE( my_max_edge, all_max_edge, & - 1, MPI_INTEGER,MPI_MAX, & - MPI_COMM_FESOM, MPIerr) - - CALL MPI_BARRIER(MPI_COMM_FESOM, ierror) - - if (mype .eq. 0) then - print *, 'Max elements per node:', all_max_elem, 'Max edges per node:', all_max_edge - print *, 'FESOM before def partition' - endif - ig_paral(1) = 1 ! Apple Partition ig_paral(2) = my_displacement ! Global Offset ig_paral(3) = my_number_of_points ! Local Extent - ! For MPI_GATHERV we need the location of the local segment in the global vector - displs_from_all_pes(1) = 0 - do i = 2, npes - displs_from_all_pes(i) = SUM(counts_from_all_pes(1:(i-1))) - enddo - + if (mype .eq. 0) then + print *, 'FESOM before def partition' + endif CALL oasis_def_partition( part_id(1), ig_paral, ierror ) if (mype .eq. 0) then print *, 'FESOM after def partition' @@ -348,282 +513,50 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) print *, 'FESOM commRank def_partition failed' call oasis_abort(comp_id, 'cpl_oasis3mct_define_unstr', 'def_partition failed') endif - - ALLOCATE(coastal_nodes(number_of_all_points)) - ALLOCATE(angle(my_number_of_points,all_max_elem+all_max_edge)) - ALLOCATE(my_x_corners(my_number_of_points,all_max_elem+all_max_edge)) - ALLOCATE(my_y_corners(my_number_of_points,all_max_elem+all_max_edge)) - ALLOCATE(coord_e_edge_center(2,my_number_of_points, all_max_edge)) - - ! We need to know for every node if any of it's edges are coastal, because - ! in case they are the center point will be a corner of the nodal area - coastal_nodes=.False. - allocate (coastal_edge_list(my_number_of_points*2,my_number_of_points*2)) - do edge=1, myDim_edge2D - ! local indice of nodes that span up edge - enodes=edges(:,edge) - ! local index of element that contribute to edge - el=edge_tri(:,edge) - if(el(2)>0) then - ! Inner edge - continue - else - ! Boundary/coastal edge - coastal_nodes(enodes(1))=.True. - coastal_nodes(enodes(2))=.True. - coastal_edge_list(enodes(1),enodes(2))=edge - coastal_edge_list(enodes(2),enodes(1))=edge - end if - end do - - - ! For every node, loop over neighbours, calculate edge center as mean of node center and neighbour node center. - coord_e_edge_center=0 - do i = 1, my_number_of_points - ! if we are on coastal node, include node center n=1 as corner - if (coastal_nodes(i)==.True.) then - do n = 1, nn_num(i) - call edge_center(i, nn_pos(n,i), this_x_coord, this_y_coord, mesh) - call r2g(coord_e_edge_center(1,i,n), coord_e_edge_center(2,i,n), this_x_coord, this_y_coord) - end do - ! else we skip n=1 and use only the edge centers n=2:nn_num(i) - else - do n = 2, nn_num(i) - call edge_center(i, nn_pos(n,i), this_x_coord, this_y_coord, mesh) - call r2g(coord_e_edge_center(1,i,n-1), coord_e_edge_center(2,i,n-1), this_x_coord, this_y_coord) - end do - end if - end do - + ALLOCATE(my_x_coords(my_number_of_points)) ALLOCATE(my_y_coords(my_number_of_points)) + ALLOCATE(my_x_corners(myDim_nod2D, 25)) + ALLOCATE(my_y_corners(myDim_nod2D, 25)) - ! Obtain center coordinates as node center on open ocean and as mean of corners at coastline do i = 1, my_number_of_points - ! Center coord as mean of corner coordiantes along coastline - if (coastal_nodes(i)==.True.) then - ! So we define temp_corner coordiantes - allocate(temp_x_coord(nod_in_elem2D_num(i)+nn_num(i))) - allocate(temp_y_coord(nod_in_elem2D_num(i)+nn_num(i))) - temp_x_coord=0 - temp_y_coord=0 - do j = 1, nod_in_elem2D_num(i) - temp_x_coord(j) = x_corners(i,j)*rad - temp_y_coord(j) = y_corners(i,j)*rad - end do - ! Loop over edges - do j = 1, nn_num(i) - ! We skip coastal edge center points for the new center point calculation - ! such that 1 element islands have the node center at the right angle - ! We only do so if n elements is > 2, to avoid having only 3 corners - if ((j>1) .and. (nod_in_elem2D_num(i) > 2)) then - edge = coastal_edge_list(i,nn_pos(j,i)) - ! if edge is coastal, we leave it out of the mean equation, replaced by the node center - if (edge>0) then - this_x_coord = coord_nod2D(1, i) - this_y_coord = coord_nod2D(2, i) - ! unrotate grid - call r2g(my_x_coords(i), my_y_coords(i), this_x_coord, this_y_coord) - temp_x_coord(j+nod_in_elem2D_num(i))=my_x_coords(i) - temp_y_coord(j+nod_in_elem2D_num(i))=my_y_coords(i) - ! case for only two elements, we need the real edge centers to ensure center coord - ! is inside polygon - else - temp_x_coord(j+nod_in_elem2D_num(i)) = coord_e_edge_center(1,i,j) - temp_y_coord(j+nod_in_elem2D_num(i)) = coord_e_edge_center(2,i,j) - end if - ! Open ocean case, we just use the corner coords - else - temp_x_coord(j+nod_in_elem2D_num(i)) = coord_e_edge_center(1,i,j) - temp_y_coord(j+nod_in_elem2D_num(i)) = coord_e_edge_center(2,i,j) - end if - end do - min_x = minval(temp_x_coord) - max_x = maxval(temp_x_coord) - ! if we are at dateline (fesom cell larger than pi) - if (max_x-min_x > pi) then - - ! set up separate data structures for the two hemispheres - n_pos=count(temp_x_coord>=0) - n_neg=count(temp_x_coord<0) - allocate(pos_x(n_pos)) - allocate(pos_y(n_pos)) - allocate(neg_x(n_neg)) - allocate(neg_y(n_neg)) - pos_x = 0 - pos_y = 0 - neg_x = 0 - neg_x = 0 - n=1 - do j = 1, size(temp_x_coord) - ! build separate corner vectors for the hemispheres - if (temp_x_coord(j) >= 0) then - pos_x(n) = temp_x_coord(j) - pos_y(n) = temp_y_coord(j) - n=n+1 - end if - end do - n=1 - do j = 1, size(temp_x_coord) - if (temp_x_coord(j) < 0) then - neg_x(n) = temp_x_coord(j) - neg_y(n) = temp_y_coord(j) - n=n+1 - end if - end do - ! if sum on right side of dateline are further from the dateline we shift the negative sum over to the right - if (-sum(pos_x)+pi*n_pos >= sum(neg_x)+pi*n_neg) then - this_x_coord = (sum(pos_x) + sum(neg_x) + 2*pi*n_neg) / (n_pos + n_neg) - this_y_coord = (sum(pos_y) + sum(neg_y)) / (n_pos + n_neg) - ! else we shift the positive sum over to the left side - else - this_x_coord = (sum(pos_x) - 2*pi*n_pos + sum(neg_x)) / (n_pos + n_neg) - this_y_coord = (sum(pos_y) + sum(neg_y)) / (n_pos + n_neg) - end if - deallocate(pos_x,pos_y,neg_x,neg_y) - ! max_x-min_x > pi -> we are not at dateline, just a normal mean is enough - else - this_x_coord = sum(temp_x_coord)/(size(temp_x_coord)) - this_y_coord = sum(temp_y_coord)/(size(temp_y_coord)) - end if - my_x_coords(i)=this_x_coord - my_y_coords(i)=this_y_coord - deallocate(temp_x_coord, temp_y_coord) - ! coastal_nodes(i)==.True. -> Node center on open ocean, we can use node center - else - this_x_coord = coord_nod2D(1, i) - this_y_coord = coord_nod2D(2, i) - ! unrotate grid - call r2g(my_x_coords(i), my_y_coords(i), this_x_coord, this_y_coord) - end if - end do - - ! Add the different corner types to single array in preparation for angle calculation - do i = 1, my_number_of_points - ! First for element center based corners - do j = 1, nod_in_elem2D_num(i) - my_x_corners(i,j) = x_corners(i,j)*rad ! atan2 takes radian and elem corners come in grad - my_y_corners(i,j) = y_corners(i,j)*rad - end do - ! Then we repeat for edge center coordinate - ! The the coast j=1 is the node center - if (coastal_nodes(i)==.True.) then - do j = 1, nn_num(i) - my_x_corners(i,j+nod_in_elem2D_num(i)) = coord_e_edge_center(1,i,j) - my_y_corners(i,j+nod_in_elem2D_num(i)) = coord_e_edge_center(2,i,j) - end do - ! On open ocean we dont use the node center as corner, and thus have one less corner - else - do j = 1, nn_num(i)-1 - my_x_corners(i,j+nod_in_elem2D_num(i)) = coord_e_edge_center(1,i,j) - my_y_corners(i,j+nod_in_elem2D_num(i)) = coord_e_edge_center(2,i,j) - end do - end if - end do - - ! calculate angle between corners and center - do i = 1, my_number_of_points - if (coastal_nodes(i)==.True.) then - n=0 - else - n=1 - end if - do j = 1, nod_in_elem2D_num(i)+nn_num(i)-n - ! If they have different sign we are near the dateline and need to bring the corner onto - ! the same hemisphere as the center (only for angle calc, the coord for oasis remains as before) - ! Default: same sign -> normal atan2 - if (my_x_coords(i) <=0 .and. my_x_corners(i,j) <=0 .or. my_x_coords(i) >0 .and. my_x_corners(i,j) >0) then - angle(i,j) = atan2(my_x_corners(i,j) - my_x_coords(i), my_y_corners(i,j) - my_y_coords(i)) - else - ! at dateline center is on the right side - if (my_x_coords(i) >=pi/2) then - angle(i,j) = atan2(my_x_corners(i,j) + 2*pi - my_x_coords(i), my_y_corners(i,j) - my_y_coords(i)) - ! at dateline center is on the left side - else if (my_x_coords(i) <=-pi/2) then - angle(i,j) = atan2(my_x_corners(i,j) - 2*pi - my_x_coords(i), my_y_corners(i,j) - my_y_coords(i)) - ! at prime meridan -> also default - else - angle(i,j) = atan2(my_x_corners(i,j) - my_x_coords(i), my_y_corners(i,j) - my_y_coords(i)) - end if - end if - end do + this_x_coord = coord_nod2D(1, i) + this_y_coord = coord_nod2D(2, i) + call r2g(my_x_coords(i), my_y_coords(i), this_x_coord, this_y_coord) end do - ! Oasis requires corners sorted counterclockwise, so we sort by angle - do i = 1, my_number_of_points - if (coastal_nodes(i)==.True.) then - n=0 - else - n=1 - end if - do l = 1, nod_in_elem2D_num(i)+nn_num(i)-1-n - do m = l+1, nod_in_elem2D_num(i)+nn_num(i)-n - if (angle(i,l) < angle(i,m)) then - ! Swap angle - temp = angle(i,m) - angle(i,m) = angle(i,l) - angle(i,l) = temp - ! Swap lon - temp = my_x_corners(i,m) - my_x_corners(i,m) = my_x_corners(i,l) - my_x_corners(i,l) = temp - ! Swap lat - temp = my_y_corners(i,m) - my_y_corners(i,m) = my_y_corners(i,l) - my_y_corners(i,l) = temp - end if - end do - end do - end do - - ! We can have a variable number of corner points. - ! Luckly oasis can deal with that by just repeating the last one. - ! Note, we are only allowed to repeat one coordinate and - ! the last one is not an element center, but an edge center - do i = 1, my_number_of_points - do j = 1, all_max_elem+all_max_edge - if (coastal_nodes(i)==.True.) then - if (j < nod_in_elem2D_num(i)+nn_num(i)) then - my_y_corners(i,j)=my_y_corners(i,j) - my_x_corners(i,j)=my_x_corners(i,j) - else - my_y_corners(i,j)=my_y_corners(i,nod_in_elem2D_num(i)+nn_num(i)) - my_x_corners(i,j)=my_x_corners(i,nod_in_elem2D_num(i)+nn_num(i)) - end if - else - if (j < nod_in_elem2D_num(i)+nn_num(i)-1) then - my_y_corners(i,j)=my_y_corners(i,j) - my_x_corners(i,j)=my_x_corners(i,j) - else - my_y_corners(i,j)=my_y_corners(i,nod_in_elem2D_num(i)+nn_num(i)-1) - my_x_corners(i,j)=my_x_corners(i,nod_in_elem2D_num(i)+nn_num(i)-1) - end if - end if - end do - end do - - ! Oasis takes grad angles my_x_coords=my_x_coords/rad my_y_coords=my_y_coords/rad - my_x_corners=my_x_corners/rad - my_y_corners=my_y_corners/rad - + if (mype .eq. 0) then + print *, 'FESOM before corner computation' + endif + call node_contours(my_x_corners, my_y_corners, partit, mesh) + if (mype .eq. 0) then + print *, 'FESOM after corner computation' + endif + if (mype .eq. localroot) then ALLOCATE(all_x_coords(number_of_all_points, 1)) ALLOCATE(all_y_coords(number_of_all_points, 1)) - ALLOCATE(all_x_corners(number_of_all_points, 1, all_max_elem+all_max_edge)) - ALLOCATE(all_y_corners(number_of_all_points, 1, all_max_elem+all_max_edge)) ALLOCATE(all_area(number_of_all_points, 1)) + ALLOCATE(all_x_corners(number_of_all_points, 1, 25)) + ALLOCATE(all_y_corners(number_of_all_points, 1, 25)) else ALLOCATE(all_x_coords(1, 1)) ALLOCATE(all_y_coords(1, 1)) - ALLOCATE(all_x_corners(1, 1, all_max_elem+all_max_edge)) - ALLOCATE(all_y_corners(1, 1, all_max_elem+all_max_edge)) ALLOCATE(all_area(1, 1)) + ALLOCATE(all_x_corners(1, 1, 1)) + ALLOCATE(all_y_corners(1, 1, 1)) endif + + displs_from_all_pes(1) = 0 + do i = 2, npes + displs_from_all_pes(i) = SUM(counts_from_all_pes(1:(i-1))) + enddo + if (mype .eq. 0) then print *, 'FESOM before 1st GatherV', displs_from_all_pes(npes), counts_from_all_pes(npes), number_of_all_points endif @@ -637,42 +570,33 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) counts_from_all_pes, displs_from_all_pes, MPI_DOUBLE_PRECISION, localroot, MPI_COMM_FESOM, ierror) if (mype .eq. 0) then - print *, 'FESOM before 3rd GatherV', displs_from_all_pes(npes), counts_from_all_pes(npes), number_of_all_points + print *, 'FESOM before 3rd GatherV' endif + CALL MPI_GATHERV(area(1,:), my_number_of_points, MPI_DOUBLE_PRECISION, all_area, & + counts_from_all_pes, displs_from_all_pes, MPI_DOUBLE_PRECISION, localroot, MPI_COMM_FESOM, ierror) - do j = 1, all_max_elem+all_max_edge - CALL MPI_GATHERV(my_x_corners(:,j), my_number_of_points, MPI_DOUBLE_PRECISION, all_x_corners(:,:,j), & + do j = 1, 25 + CALL MPI_GATHERV(my_x_corners(:,j), myDim_nod2D, MPI_DOUBLE_PRECISION, all_x_corners(:,:,j), & counts_from_all_pes, displs_from_all_pes, MPI_DOUBLE_PRECISION, localroot, MPI_COMM_FESOM, ierror) - CALL MPI_GATHERV(my_y_corners(:,j), my_number_of_points, MPI_DOUBLE_PRECISION, all_y_corners(:,:,j), & + CALL MPI_GATHERV(my_y_corners(:,j), myDim_nod2D, MPI_DOUBLE_PRECISION, all_y_corners(:,:,j), & counts_from_all_pes, displs_from_all_pes, MPI_DOUBLE_PRECISION, localroot, MPI_COMM_FESOM, ierror) end do - if (mype .eq. 0) then - print *, 'FESOM before 4th GatherV' - endif - CALL MPI_GATHERV(area(1,:), my_number_of_points, MPI_DOUBLE_PRECISION, all_area, & - counts_from_all_pes, displs_from_all_pes, MPI_DOUBLE_PRECISION, localroot, MPI_COMM_FESOM, ierror) - - if (mype .eq. 0) then - print *, 'FESOM after 4th GatherV' - endif - - CALL MPI_Barrier(MPI_COMM_FESOM, ierror) if (mype .eq. 0) then print *, 'FESOM after Barrier' endif if (mype .eq. localroot) then - print *, 'FESOM before start_grids_writing' + print *, 'FESOM before grid writing to oasis grid files' CALL oasis_start_grids_writing(il_flag) IF (il_flag .NE. 0) THEN - print *, 'FESOM before write grid' + print *, 'FESOM before write grid centers' CALL oasis_write_grid (grid_name, number_of_all_points, 1, all_x_coords(:,:), all_y_coords(:,:)) print *, 'FESOM before write corner' - CALL oasis_write_corner (grid_name, number_of_all_points, 1, all_max_elem+all_max_edge, all_x_corners(:,:,:), all_y_corners(:,:,:)) + CALL oasis_write_corner (grid_name, number_of_all_points, 1, 25, all_x_corners(:,:,:), all_y_corners(:,:,:)) ALLOCATE(unstr_mask(number_of_all_points, 1)) unstr_mask=0 @@ -688,10 +612,10 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) call oasis_terminate_grids_writing() print *, 'FESOM after terminate_grids_writing' endif !localroot + + DEALLOCATE(all_x_coords, all_y_coords, my_x_coords, my_y_coords) - DEALLOCATE(all_x_corners, all_y_corners, my_x_corners, my_y_corners, angle) - DEALLOCATE(coastal_nodes, coord_e_edge_center) !------------------------------------------------------------------ ! 3rd Declare the transient variables !------------------------------------------------------------------ @@ -808,9 +732,6 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) call exchange_roots(source_root, target_root, 1, partit%MPI_COMM_FESOM, MPI_COMM_WORLD) if (commRank) print *, 'FESOM source/target roots: ', source_root, target_root #endif - if (mype .eq. 0) then - print *, 'After enddef' - endif ! WAS VOM FOLGENDEN BRAUCHE ICH NOCH ??? @@ -818,9 +739,6 @@ subroutine cpl_oasis3mct_define_unstr(partit, mesh) allocate(exfld(myDim_nod2D)) cplsnd=0. o2a_call_count=0 - if (mype .eq. 0) then - print *, 'Before last barrier' - endif CALL MPI_BARRIER(MPI_COMM_FESOM, ierror) if (mype .eq. 0) then diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 index 422aada79..fed4f0698 100755 --- a/src/fesom_module.F90 +++ b/src/fesom_module.F90 @@ -45,7 +45,7 @@ module fesom_main_storage_module integer :: n, from_nstep, offset, row, i, provided integer :: which_readr ! read which restart files (0=netcdf, 1=core dump,2=dtype) integer :: total_nsteps - integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM + integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM, MPI_COMM_WORLD real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing @@ -89,6 +89,9 @@ module fesom_module subroutine fesom_init(fesom_total_nsteps) use fesom_main_storage_module +#if defined(__MULTIO) + use iom +#endif integer, intent(out) :: fesom_total_nsteps ! EO parameters logical mpi_is_initialized @@ -108,7 +111,7 @@ subroutine fesom_init(fesom_total_nsteps) !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS call MPI_Initialized(mpi_is_initialized, f%i) if(.not. mpi_is_initialized) then - ! do not initialize MPI here if it has been initialized already, e.g. via IFS when fesom is called as library (__ifsinterface is defined) + ! TODO: do not initialize MPI here if it has been initialized already, e.g. via IFS when fesom is called as library (__ifsinterface is defined) call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, f%provided, f%i) f%fesom_did_mpi_init = .true. end if @@ -125,6 +128,7 @@ subroutine fesom_init(fesom_total_nsteps) f%mype =>f%partit%mype f%MPIerr =>f%partit%MPIerr f%MPI_COMM_FESOM=>f%partit%MPI_COMM_FESOM + f%MPI_COMM_WORLD=>f%partit%MPI_COMM_WORLD f%npes =>f%partit%npes if(f%mype==0) then write(*,*) @@ -243,6 +247,10 @@ subroutine fesom_init(fesom_total_nsteps) write(*,*) '============================================' endif +#if defined(__MULTIO) + call iom_send_fesom_domains(f%partit, f%mesh) +#endif + ! f%dump_dir='DUMP/' ! INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) ! if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) @@ -399,6 +407,10 @@ subroutine fesom_runloop(current_nsteps) subroutine fesom_finalize() use fesom_main_storage_module +#if defined(__MULTIO) + use iom + use mpp_io +#endif ! EO parameters real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) @@ -441,6 +453,10 @@ subroutine fesom_finalize() ! OpenIFS coupled version has to call oasis_terminate through par_ex call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) #endif + +#if defined(__MULTIO) && !defined(__ifsinterface) && !defined(__oasis) + call mpp_stop +#endif if(f%fesom_did_mpi_init) call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) ! finalize MPI before FESOM prints its stats block, otherwise there is sometimes output from other processes from an earlier time in the programm AFTER the starts block (with parastationMPI) if (f%mype==0) then 41 format (a35,a10,2a15) !Format for table heading diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index eac201990..30ee8f5f6 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -888,7 +888,7 @@ subroutine find_levels(mesh) !___________________________________________________________ ! loop over neighbouring triangles do i=1,nneighb - if (elems(i)>1) then + if (elems(i)>0) then if (nlevels(elems(i))>=nz) then !count neighbours count_neighb_open=count_neighb_open+1 diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index edf441e73..03137840c 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -42,17 +42,33 @@ subroutine par_init(partit) ! initializes MPI USE o_PARAM USE MOD_PARTIT USE MOD_PARSUP +#ifdef __MULTIO + USE iom + USE mpp_io +#endif + implicit none type(t_partit), intent(inout), target :: partit integer :: i integer :: provided_mpi_thread_support_level character(:), allocatable :: provided_mpi_thread_support_level_name - -#if defined __oasis || defined __ifsinterface +#ifdef __oasis || defined __ifsinterface ! use comm from coupler or ifs -#else - partit%MPI_COMM_FESOM=MPI_COMM_WORLD ! use global comm if not coupled (e.g. no __oasis or __ifsinterface) -#endif + ! TODO: multio with __ifsinterface is magically handled by IFS by using same module + ! names and routine names as in src/ifs_interface, that is not elegant. +#else +#ifdef __MULTIO + CALL MPI_Comm_Size(MPI_COMM_WORLD, partit%npes, i) + CALL MPI_Comm_Rank(MPI_COMM_WORLD, partit%mype, i) + partit%MPI_COMM_FESOM=MPI_COMM_WORLD + partit%MPI_COMM_WORLD=MPI_COMM_WORLD + call mpp_io_init_2(partit%MPI_COMM_FESOM) +#else + partit%MPI_COMM_FESOM=MPI_COMM_WORLD ! use global comm if not coupled (e.g. no __oasis or __ifsinterface or IO server) +#endif + +#endif + call MPI_Comm_Size(partit%MPI_COMM_FESOM,partit%npes,i) call MPI_Comm_Rank(partit%MPI_COMM_FESOM,partit%mype,i) @@ -93,8 +109,10 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI #else !For ECHAM coupled runs we use the old OASIS nameing scheme (prism / prism_proto) use mod_prism -#endif ! oifs/echam -#endif ! oasis +#endif + ! oifs/echam +#endif + ! oasis implicit none integer, intent(in) :: COMM @@ -104,16 +122,23 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI ! For standalone runs we directly call the MPI_barrier and MPI_finalize !--------------------------------------------------------------- +!TODO: logic is convoluted here, not defined oasis and model needs to abort doesn't happen using par_ex #ifndef __oasis if (present(abort)) then if (mype==0) write(*,*) 'Run finished unexpectedly!' call MPI_ABORT(COMM, 1 ) else + ! TODO: this is where fesom standalone, ifsinterface etc get to + !1. there no abort actually even when model calls abort, and barrier may hang + !2. when using fesom as lib using finalize is bad here as there may + ! be other MPI tasks running in calling library like IFS, better + ! better practice in that case would be to free the communicator. call MPI_Barrier(COMM, error) call MPI_Finalize(error) endif -#else ! standalone +#else ! standalone +! TODO logic below is also convoluted really not really for standalone ! From here on the two coupled options !------------------------------------- #if defined (__oifs) @@ -136,8 +161,10 @@ subroutine par_ex(COMM, mype, abort) ! finalizes MPI if (mype==0) print *, 'FESOM calls MPI_Finalize' call MPI_Finalize(error) -#endif ! oifs/echam -#endif ! oasis +#endif + ! oifs/echam +#endif + ! oasis ! Regardless of standalone, OpenIFS oder ECHAM coupling, if we reach to this point ! we should be fine shutting the whole model down diff --git a/src/gen_modules_read_NetCDF.F90 b/src/gen_modules_read_NetCDF.F90 index 8e459d1db..8118fa78a 100755 --- a/src/gen_modules_read_NetCDF.F90 +++ b/src/gen_modules_read_NetCDF.F90 @@ -205,7 +205,7 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, partit integer :: i, j, n, num integer :: itime, latlen, lonlen integer :: status, ncid, varid - integer :: lonid, latid + integer :: lonid, latid, drain_num integer :: istart(4), icount(4) real(real64) :: x, y, miss real(real64), allocatable :: lon(:), lat(:) @@ -363,7 +363,264 @@ subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, partit, call MPI_BCast(ncdata, nod2D, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) model_2Darray=ncdata(myList_nod2D) end subroutine read_2ddata_on_grid_NetCDF - -end module g_read_other_NetCDF +subroutine read_runoff_mapper(file, vari, R, partit, mesh) + ! 1. Read arrival points from the runoff mapper + ! 2. Create conservative remapping A*X=runoff: + ! A=remapping operator; X=runoff into drainage basins (in Sv); runoff= runoff im [m/s] to be put into the ocean + use g_config + use o_param + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE g_forcing_arrays, only: runoff + use g_support + implicit none + +#include "netcdf.inc" + character(*), intent(in) :: file + character(*), intent(in) :: vari + real(kind=WP), intent(in):: R + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: i, j, n, num, cnt, number_arrival_points, offset + real(kind=WP) :: dist, W + integer :: itime, latlen, lonlen + integer :: status, ncid, varid + integer :: lonid, latid, drain_num + integer :: istart(2), icount(2) + real(kind=WP), allocatable :: lon(:), lat(:) + integer, allocatable :: ncdata(:,:) + real(kind=WP), allocatable :: lon_sparse(:), lat_sparse(:), dist_min(:), dist_min_glo(:) + real(kind=WP), allocatable :: arrival_area(:) + integer, allocatable :: data_sparse(:), dist_ind(:) + integer :: ierror ! return error code + type(sparse_matrix) :: RUNOFF_MAPPER + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (mype==0) write(*,*) 'building RUNOFF MAPPER with radius of smoothing= ', R*1.e-3, ' km' + if (mype==0) then + ! open file + status=nf_open(trim(file), nf_nowrite, ncid) + end if + + call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + if (status.ne.nf_noerr)then + print*,'ERROR: CANNOT READ 2D netCDF FILE CORRECTLY !!!!!' + print*,'Error in opening netcdf file '//file + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + endif + + if (mype==0) then + ! lat + status=nf_inq_dimid(ncid, 'lat', latid) + status=nf_inq_dimlen(ncid, latid, latlen) + ! lon + status=nf_inq_dimid(ncid, 'lon', lonid) + status=nf_inq_dimlen(ncid, lonid, lonlen) + end if + call MPI_BCast(latlen, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(lonlen, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + + ! lat + if (mype==0) then + allocate(lat(latlen)) + status=nf_inq_varid(ncid, 'lat', varid) + status=nf_get_vara_double(ncid,varid,1,latlen,lat) + end if + + ! lon + if (mype==0) then + allocate(lon(lonlen)) + status=nf_inq_varid(ncid, 'lon', varid) + status=nf_get_vara_double(ncid,varid,1,lonlen,lon) + ! make sure range 0. - 360. + do n=1,lonlen + if (lon(n)<0.0_WP) then + lon(n)=lon(n)+360._WP + end if + end do + end if + + if (mype==0) then + allocate(ncdata(lonlen,latlen)) + ncdata = 0.0_WP + ! data + status=nf_inq_varid(ncid, trim(vari), varid) + istart = (/1,1/) + icount= (/lonlen,latlen/) + status=nf_get_vara_int(ncid,varid,istart,icount,ncdata) + ! close file + status=nf_close(ncid) + number_arrival_points=0 + do i=1, lonlen + do j=1, latlen + if (ncdata(i,j)>0) then + number_arrival_points=number_arrival_points+1 + end if + end do + end do + end if + + call MPI_BCast(number_arrival_points, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + allocate(lon_sparse(number_arrival_points), lat_sparse(number_arrival_points)) + allocate(dist_min(number_arrival_points), dist_min_glo(number_arrival_points), dist_ind(number_arrival_points)) + allocate(data_sparse(number_arrival_points)) + + if (mype==0) then + cnt=1 + do i=1, lonlen + do j=1, latlen + if (ncdata(i,j)>0) then + lon_sparse(cnt)=lon(i) + lat_sparse(cnt)=lat(j) + data_sparse(cnt)=ncdata(i,j) + cnt=cnt+1 + end if + end do + end do + deallocate(ncdata, lon, lat) + end if + call MPI_BCast(lon_sparse, number_arrival_points, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(lat_sparse, number_arrival_points, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(data_sparse, number_arrival_points, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + drain_num=maxval(data_sparse) + ALLOCATE(arrival_area(drain_num)) + arrival_area=0.0_WP ! will be used further to normalize the total flux + lon_sparse=lon_sparse-360.0_WP + lon_sparse=lon_sparse*rad + lat_sparse=lat_sparse*rad + + do n=1, number_arrival_points + do i=1, myDim_nod2d + dist=distance_on_sphere(lon_sparse(n), lat_sparse(n), geo_coord_nod2D(1,i), geo_coord_nod2D(2,i)) + if (i==1) then + dist_min(n)=dist + dist_ind(n)=1 + end if + if (distdrain_num)) then + if (mype==0) then + write(*,*) 'RUNOFF MAPPER ERROR: arrival point has an index outside of permitted range', j, drain_num + write(*,*) 'two different grid points have same distance to a target point!' + end if + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + STOP + end if + dist=distance_on_sphere(lon_sparse(i), lat_sparse(i), geo_coord_nod2D(1,n), geo_coord_nod2D(2,n)) + if (dist < R) then + RUNOFF_MAPPER%values(offset+cnt)=(1.0-dist/R) + RUNOFF_MAPPER%colind(offset+cnt)=j + if (n<=myDim_nod2d) then + arrival_area(j)=arrival_area(j)+(1.0-dist/R)*W + end if + cnt=cnt+1 + end if + END DO + if (cnt==0) then + RUNOFF_MAPPER%values(offset)=0.0_WP + RUNOFF_MAPPER%colind(offset)=1 + end if + END DO + + call MPI_AllREDUCE(MPI_IN_PLACE , arrival_area, drain_num, MPI_DOUBLE, MPI_SUM, MPI_COMM_FESOM, MPIerr) + DO i=1, drain_num + where (RUNOFF_MAPPER%colind==i) + RUNOFF_MAPPER%values=RUNOFF_MAPPER%values/arrival_area(i) + end where + END DO + + deallocate(lon_sparse, lat_sparse, dist_min, dist_min_glo) + deallocate(arrival_area) + deallocate(data_sparse, dist_ind) + +do n=1, myDim_nod2D+eDim_nod2D + i=RUNOFF_MAPPER%rowptr(n) + j=RUNOFF_MAPPER%rowptr(n+1)-1 + runoff(n)=sum(RUNOFF_MAPPER%values(i:j)) +end do + +call integrate_nod(runoff, W, partit, mesh) + +if (mype==0) write(*,*) 'RUNOFF MAPPER check (total amount of basins):', drain_num +if (mype==0) write(*,*) 'RUNOFF MAPPER check (input of 1Sv from each basin results in runoff of):', W, ' Sv' +runoff=runoff*1.e-2 +end subroutine read_runoff_mapper + +real(kind=WP) function distance_on_sphere(lon1, lat1, lon2, lat2) +! use, intrinsic :: ISO_FORTRAN_ENV + use o_param + use g_config + implicit none +!lons & lats are in radians + real(kind=WP), intent(in) :: lon1, lat1, lon2, lat2 + real(kind=WP) :: r, delta_lon, delta_lat + + delta_lon=abs(lon1-lon2) + if (delta_lon > cyclic_length/2.0_WP) delta_lon=delta_lon-cyclic_length + delta_lat=(lat1-lat2) + r = sin(delta_lat/2.0)**2 + cos(lat1) * cos(lat2) * sin(delta_lon/2.0)**2 + distance_on_sphere=2.0 * atan2(sqrt(r), sqrt(1.0 - r))*r_earth +end function distance_on_sphere +end module g_read_other_NetCDF \ No newline at end of file diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 967f217f8..529ce37d1 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -30,7 +30,7 @@ MODULE g_sbf !! we assume that all NetCDF files have identical grid and time variable !! !! public: - !! sbc_ini -- inizialization atmpospheric forcing + !! sbc_ini -- initialization atmpospheric forcing !! sbc_do -- provide a sbc (surface boundary conditions) each time step !! USE MOD_MESH @@ -44,7 +44,7 @@ MODULE g_sbf USE g_config, only: dummy, ClimateDataPath, dt USE g_clock, only: timeold, timenew, dayold, daynew, yearold, yearnew, cyearnew USE g_forcing_arrays, only: runoff, chl - USE g_read_other_NetCDF, only: read_other_NetCDF, read_2ddata_on_grid_netcdf + USE g_read_other_NetCDF, only: read_other_NetCDF, read_2ddata_on_grid_netcdf, read_runoff_mapper IMPLICIT NONE include 'netcdf.inc' @@ -904,7 +904,7 @@ SUBROUTINE sbc_ini(partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE sbc_ini *** !! - !! ** Purpose : inizialization of ocean forcing + !! ** Purpose : initialization of ocean forcing !! ** Method : !! ** Action : !!---------------------------------------------------------------------- @@ -945,13 +945,13 @@ SUBROUTINE sbc_ini(partit, mesh) READ( nm_sbc_unit, nml=nam_sbc, iostat=iost ) close( nm_sbc_unit ) - if (mype==0) write(*,*) "Start: Ocean forcing inizialization." + if (mype==0) write(*,*) "Start: Ocean forcing initialization." rdate = real(julday(yearnew,1,1)) rdate = rdate+real(daynew-1,WP)+timenew/86400._WP idate = int(rdate) if (mype==0) then - write(*,*) "Start: Ocean forcing inizialization." + write(*,*) "Start: Ocean forcing initialization." write(*,*) "Surface boundary conditions parameters:" end if @@ -1098,8 +1098,9 @@ SUBROUTINE sbc_ini(partit, mesh) end if end if - if (mype==0) write(*,*) "DONE: Ocean forcing inizialization." + if (mype==0) write(*,*) "DONE: Ocean forcing initialization." if (mype==0) write(*,*) 'Parts of forcing data (only constant in time fields) are read' +! call read_runoff_mapper("/p/project/chhb19/streffing1/input/runoff-mapper/runoff_maps_D3.nc", "arrival_point_id", 1.0_WP, partit, mesh) END SUBROUTINE sbc_ini SUBROUTINE sbc_do(partit, mesh) diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index a2ee681ed..0f1f064ee 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -136,7 +136,7 @@ subroutine ice_TG_rhs(ice, partit, mesh) rhs_ms(row)=0._WP #if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=0._WP -#endif /* (__oifs) */ +#endif END DO !$OMP END DO ! Velocities at nodes @@ -174,7 +174,7 @@ subroutine ice_TG_rhs(ice, partit, mesh) rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes)) #if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes)) -#endif /* (__oifs) */ +#endif END DO end do !$OMP END DO @@ -210,7 +210,7 @@ subroutine ice_fct_solve(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) call ice_fem_fct(4, ice, partit, mesh) ! ice_temp -#endif /* (__oifs) */ +#endif end subroutine ice_fct_solve ! @@ -297,7 +297,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) m_templ(row)=(rhs_temp(row)+gamma*sum(mass_matrix(clo:clo2)* & ice_temp(location(1:cn))))/area(1,row) + & (1.0_WP-gamma)*ice_temp(row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP !$OMP END PARALLEL DO @@ -305,7 +305,7 @@ subroutine ice_solve_low_order(ice, partit, mesh) call exchange_nod(m_icel,a_icel,m_snowl, partit, luse_g2g = .true.) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(m_templ, partit, luse_g2g = .true.) -#endif /* (__oifs) */ +#endif !$OMP BARRIER end subroutine ice_solve_low_order @@ -372,7 +372,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_snow(row)=rhs_ms(row)/area(1,row) #if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=rhs_temp(row)/area(1,row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP @@ -380,7 +380,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) call exchange_nod(dm_ice, da_ice, dm_snow, partit, luse_g2g = .true.) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit, luse_g2g = .true.) -#endif /* (__oifs) */ +#endif !$OMP BARRIER !___________________________________________________________________________ !iterate @@ -407,7 +407,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) rhs_new = rhs_temp(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) m_templ(row)= dm_temp(row)+rhs_new/area(1,row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP @@ -424,7 +424,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) dm_snow(row)=m_snowl(row) #if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=m_templ(row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP @@ -434,7 +434,7 @@ subroutine ice_solve_high_order(ice, partit, mesh) call exchange_nod(dm_ice, da_ice, dm_snow, partit, luse_g2g = .true.) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit, luse_g2g = .true.) -#endif /* (__oifs) */ +#endif !$OMP BARRIER end do end subroutine ice_solve_high_order @@ -504,7 +504,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) ! it takes memory and time. For every element ! we need its antidiffusive contribution to ! each of its 3 nodes - +!$OMP PARALLEL DO !$ACC DATA CREATE(icoef, elnodes) !$ACC PARALLEL LOOP GANG VECTOR DEFAULT(PRESENT) @@ -513,20 +513,18 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) tmin(n) = 0.0_WP end do !$ACC END PARALLEL LOOP - +!$OMP END PARALLEL DO ! Auxiliary elemental operator (mass matrix- lumped mass matrix) !$ACC KERNELS icoef = 1 !$ACC END KERNELS - !$ACC PARALLEL LOOP GANG VECTOR DEFAULT(PRESENT) do n=1,3 ! three upper nodes ! Cycle over rows row=elnodes(n) icoef(n,n)=-2 end do !$ACC END PARALLEL LOOP - !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, q, elem, elnodes, row, vol, flux, ae) !$OMP DO @@ -569,7 +567,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) dm_temp(elnodes)))*(vol/area(1,elnodes(q)))/12.0_WP end do end if -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP !$OMP END DO @@ -646,7 +644,7 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) !$ACC END PARALLEL LOOP !$OMP END DO end if -#endif /* (__oifs) */ +#endif !___________________________________________________________________________ ! Sums of positive/negative fluxes to node row @@ -946,12 +944,12 @@ subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) #endif !$OMP END DO end if -#endif /* (__oifs) */ || defined (__ifsinterface) +#endif !$OMP END PARALLEL call exchange_nod(m_ice, a_ice, m_snow, partit, luse_g2g = .true.) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(ice_temp, partit, luse_g2g = .true.) -#endif /* (__oifs) */ +#endif !$ACC END DATA @@ -1117,7 +1115,7 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) ! Computes the rhs in a Taylor-Galerkin way (with upwind type of ! correction for the advection operator) ! In this version I tr to split divergent term off, so that FCT works without it. - +!$OMP PARALLEL DO !$ACC PARALLEL LOOP GANG VECTOR DEFAULT(PRESENT) do row=1, myDim_nod2D !! row=myList_nod2D(m) @@ -1126,15 +1124,16 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) rhs_ms(row)=0.0_WP #if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=0.0_WP -#endif /* (__oifs) */ +#endif rhs_mdiv(row)=0.0_WP rhs_adiv(row)=0.0_WP rhs_msdiv(row)=0.0_WP #if defined (__oifs) || defined (__ifsinterface) rhs_tempdiv(row)=0.0_WP -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP +!$OMP END PARALLEL DO !$OMP PARALLEL DEFAULT(SHARED) PRIVATE(diff, entries, um, vm, vol, dx, dy, n, q, row, elem, elnodes, c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2) !$OMP DO @@ -1180,7 +1179,7 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) cx3=vol*ice%ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP #if defined (__oifs) || defined (__ifsinterface) cx4=vol*ice%ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP -#endif /* (__oifs) */ +#endif !___________________________________________________________________ #if defined(_OPENMP) && !defined(__openmp_reproducible) @@ -1212,7 +1211,7 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) !$ACC ATOMIC UPDATE #endif rhs_temp(row)=rhs_temp(row)+tmp_sum+cx4 -#endif /* (__oifs) */ +#endif !___________________________________________________________________ #if !defined(DISABLE_OPENACC_ATOMICS) @@ -1232,7 +1231,7 @@ subroutine ice_TG_rhs_div(ice, partit, mesh) !$ACC ATOMIC UPDATE #endif rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 -#endif /* (__oifs) */ +#endif #if defined(_OPENMP) && !defined(__openmp_reproducible) call omp_unset_lock(partit%plock(row)) #else @@ -1316,7 +1315,7 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_snow(row)=rhs_msdiv(row)/area(1,row) #if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=rhs_tempdiv(row)/area(1,row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP @@ -1326,7 +1325,7 @@ subroutine ice_update_for_div(ice, partit, mesh) call exchange_nod(dm_snow, partit, luse_g2g = .true.) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit, luse_g2g = .true.) -#endif /* (__oifs) */ +#endif !$OMP BARRIER !___________________________________________________________________________ !iterate @@ -1356,7 +1355,7 @@ subroutine ice_update_for_div(ice, partit, mesh) #if defined (__oifs) || defined (__ifsinterface) rhs_new = rhs_tempdiv(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) m_templ(row)= dm_temp(row)+rhs_new/area(1,row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP @@ -1372,7 +1371,7 @@ subroutine ice_update_for_div(ice, partit, mesh) dm_snow(row) = m_snowl(row) #if defined (__oifs) || defined (__ifsinterface) dm_temp(row) = m_templ(row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP @@ -1383,7 +1382,7 @@ subroutine ice_update_for_div(ice, partit, mesh) call exchange_nod(dm_snow, partit, luse_g2g = .true.) #if defined (__oifs) || defined (__ifsinterface) call exchange_nod(dm_temp, partit, luse_g2g = .true.) -#endif /* (__oifs) */ +#endif !$OMP BARRIER end do @@ -1396,7 +1395,7 @@ subroutine ice_update_for_div(ice, partit, mesh) m_snow(row) = m_snow(row)+dm_snow(row) #if defined (__oifs) || defined (__ifsinterface) ice_temp(row)= ice_temp(row)+dm_temp(row) -#endif /* (__oifs) */ +#endif end do !$ACC END PARALLEL LOOP !$OMP END PARALLEL DO diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index debe58873..62fa9c0a0 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -130,6 +130,7 @@ subroutine stress_tensor_m(ice, partit, mesh) vale=1.0_WP/(ice%ellipse**2) det2=1.0_WP/(1.0_WP+ice%alpha_evp) det1=ice%alpha_evp*det2 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, dx, dy, msum, asum, eps1, eps2, pressure, delta, meancos, usum, vsum, r1, r2, r3, si1, si2) do elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -184,6 +185,7 @@ subroutine stress_tensor_m(ice, partit, mesh) rdg_shear_elem(elem) = 0.5_WP*(delta - abs(eps11(elem)+eps22(elem))) #endif end do +!$OMP END PARALLEL DO ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of ! Boullion et al Ocean Modelling 2013, but in an implicit mode: ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), @@ -232,15 +234,18 @@ subroutine ssh2rhs(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, elem, elnodes, n, dx, dy, vol, meancos, aa, bb, p_ice) +!$OMP DO ! use rhs_m and rhs_a for storing the contribution from elevation: do row=1, myDim_nod2d rhs_a(row)=0.0_WP rhs_m(row)=0.0_WP end do - +!$OMP END DO !_____________________________________________________________________________ ! use floating sea ice for zlevel and zstar if (use_floatice .and. .not. trim(which_ale)=='linfs') then +!$OMP DO do elem=1,myDim_elem2d elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -263,10 +268,24 @@ subroutine ssh2rhs(ice, partit, mesh) bb=g*val3*vol aa=bb*sum(dx*(elevation(elnodes)+p_ice)) bb=bb*sum(dy*(elevation(elnodes)+p_ice)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + do n=1,3 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(n))) +#else +!$OMP ORDERED +#endif + rhs_a(elnodes(n))=rhs_a(elnodes(n))-aa + rhs_m(elnodes(n))=rhs_m(elnodes(n))-bb +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(n))) +#else +!$OMP END ORDERED +#endif + end do end do +!$OMP END DO else +!$OMP DO do elem=1,myDim_elem2d elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -279,10 +298,24 @@ subroutine ssh2rhs(ice, partit, mesh) bb=g*val3*vol aa=bb*sum(dx*elevation(elnodes)) bb=bb*sum(dy*elevation(elnodes)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + do n=1,3 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(n))) +#else +!$OMP ORDERED +#endif + rhs_a(elnodes(n))=rhs_a(elnodes(n))-aa + rhs_m(elnodes(n))=rhs_m(elnodes(n))-bb +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(n))) +#else +!$OMP END ORDERED +#endif + end do end do +!$OMP END DO end if +!$OMP END PARALLEL end subroutine ssh2rhs ! ! @@ -330,12 +363,15 @@ subroutine stress2rhs_m(ice, partit, mesh) !___________________________________________________________________________ val3=1.0_WP/3.0_WP - +!$OMP PARALLEL DO do row=1, myDim_nod2d u_rhs_ice(row)=0.0_WP v_rhs_ice(row)=0.0_WP end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, k, row, dx, dy, vol, mf, aa, bb, mass, cluster_area, elevation_elem) +!$OMP DO do elem=1,myDim_elem2d elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ @@ -351,15 +387,26 @@ subroutine stress2rhs_m(ice, partit, mesh) do k=1,3 row=elnodes(k) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(row)) +#else +!$OMP ORDERED +#endif u_rhs_ice(row)=u_rhs_ice(row) - vol* & (sigma11(elem)*dx(k)+sigma12(elem)*dy(k)) & -vol*sigma12(elem)*val3*mf !metrics v_rhs_ice(row)=v_rhs_ice(row) - vol* & (sigma12(elem)*dx(k)+sigma22(elem)*dy(k)) & +vol*sigma11(elem)*val3*mf ! metrics +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED +#endif end do end do - +!$OMP END DO +!$OMP DO do row=1, myDim_nod2d !_______________________________________________________________________ ! if cavity node skip it @@ -370,6 +417,8 @@ subroutine stress2rhs_m(ice, partit, mesh) u_rhs_ice(row)=(u_rhs_ice(row)*mass + rhs_a(row))/area(1,row) v_rhs_ice(row)=(v_rhs_ice(row)*mass + rhs_m(row))/area(1,row) end do +!$OMP END DO +!$OMP END PARALLEL end subroutine stress2rhs_m ! ! @@ -485,16 +534,18 @@ subroutine EVPdynamics_m(ice, partit, mesh) !NR inlined, to have all initialization in one place. ! call ssh2rhs - ! use rhs_m and rhs_a for storing the contribution from elevation: +!$OMP PARALLEL DO do row=1, myDim_nod2d rhs_a(row)=0.0_WP rhs_m(row)=0.0_WP end do - +!$OMP END PARALLEL DO !_____________________________________________________________________________ ! use floating sea ice for zlevel and zstar +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(el, elnodes, vol, dx, dy, p_ice, n, bb, aa) if (use_floatice .and. .not. trim(which_ale)=='linfs') then +!$OMP DO do el=1,myDim_elem2d elnodes=elem2D_nodes(:,el) @@ -518,12 +569,26 @@ subroutine EVPdynamics_m(ice, partit, mesh) bb=g*val3*vol aa=bb*sum(dx*(elevation(elnodes)+p_ice)) bb=bb*sum(dy*(elevation(elnodes)+p_ice)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + do n=1, 3 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(n))) +#else +!$OMP ORDERED +#endif + rhs_a(elnodes(n))=rhs_a(elnodes(n))-aa + rhs_m(elnodes(n))=rhs_m(elnodes(n))-bb +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(n))) +#else +!$OMP END ORDERED +#endif + end do end do +!$OMP END DO !_____________________________________________________________________________ ! use levitating sea ice for linfs, zlevel and zstar else +!$OMP DO do el=1,myDim_elem2d elnodes=elem2D_nodes(:,el) !_______________________________________________________________________ @@ -536,13 +601,27 @@ subroutine EVPdynamics_m(ice, partit, mesh) bb=g*val3*vol aa=bb*sum(dx*elevation(elnodes)) bb=bb*sum(dy*elevation(elnodes)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + do n=1, 3 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(n))) +#else +!$OMP ORDERED +#endif + rhs_a(elnodes(n))=rhs_a(elnodes(n))-aa + rhs_m(elnodes(n))=rhs_m(elnodes(n))-bb +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(n))) +#else +!$OMP END ORDERED +#endif + end do end do +!$OMP END DO end if - +!$OMP END PARALLEL !___________________________________________________________________________ ! precompute thickness (the inverse is needed) and mass (scaled by area) +!$OMP PARALLEL DO do i=1,myDim_nod2D inv_thickness(i) = 0._WP mass(i) = 0._WP @@ -565,9 +644,10 @@ subroutine EVPdynamics_m(ice, partit, mesh) ice_nod(i) = .true. endif enddo - +!$OMP END PARALLEL DO !___________________________________________________________________________ ! precompute pressure factor +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum) do el=1,myDim_elem2D elnodes=elem2D_nodes(:,el) pressure_fac(el) = 0._WP @@ -584,12 +664,13 @@ subroutine EVPdynamics_m(ice, partit, mesh) pressure_fac(el) = det2*ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) endif end do - +!$OMP END PARALLEL DO +!$OMP PARALLEL DO do row=1, myDim_nod2d u_rhs_ice(row)=0.0_WP v_rhs_ice(row)=0.0_WP end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ ! Ice EVPdynamics Iteration main loop: #if defined (__icepack) @@ -603,6 +684,8 @@ subroutine EVPdynamics_m(ice, partit, mesh) ! New implementation following Boullion et al, Ocean Modelling 2013. ! SD, 30.07.2014 !_______________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(el, i, ed, row, elnodes, dx, dy, meancos, eps1, eps2, delta, pressure, umod, drag, rhsu, rhsv, det, n) +!$OMP DO do el=1,myDim_elem2D if (ulevels(el)>1) cycle @@ -655,28 +738,59 @@ subroutine EVPdynamics_m(ice, partit, mesh) ! SD, 30.07.2014 !----------------------------------------------------------------- if (elnodes(1) <= myDim_nod2D) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(1))) +#else +!$OMP ORDERED +#endif u_rhs_ice(elnodes(1)) = u_rhs_ice(elnodes(1)) - elem_area(el)* & (sigma11(el)*dx(1)+sigma12(el)*(dy(1) + meancos)) !metrics v_rhs_ice(elnodes(1)) = v_rhs_ice(elnodes(1)) - elem_area(el)* & - (sigma12(el)*dx(1)+sigma22(el)*dy(1) - sigma11(el)*meancos) ! metrics + (sigma12(el)*dx(1)+sigma22(el)*dy(1) - sigma11(el)*meancos) !metrics +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(1))) +#else +!$OMP END ORDERED +#endif end if if (elnodes(2) <= myDim_nod2D) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(2))) +#else +!$OMP ORDERED +#endif u_rhs_ice(elnodes(2)) = u_rhs_ice(elnodes(2)) - elem_area(el)* & (sigma11(el)*dx(2)+sigma12(el)*(dy(2) + meancos)) !metrics v_rhs_ice(elnodes(2)) = v_rhs_ice(elnodes(2)) - elem_area(el)* & - (sigma12(el)*dx(2)+sigma22(el)*dy(2) - sigma11(el)*meancos) ! metrics + (sigma12(el)*dx(2)+sigma22(el)*dy(2) - sigma11(el)*meancos) !metrics +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(2))) +#else +!$OMP END ORDERED +#endif end if if (elnodes(3) <= myDim_nod2D) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elnodes(3))) +#else +!$OMP ORDERED +#endif u_rhs_ice(elnodes(3)) = u_rhs_ice(elnodes(3)) - elem_area(el)* & (sigma11(el)*dx(3)+sigma12(el)*(dy(3) + meancos)) !metrics v_rhs_ice(elnodes(3)) = v_rhs_ice(elnodes(3)) - elem_area(el)* & - (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) ! metrics + (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) !metrics +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elnodes(3))) +#else +!$OMP END ORDERED +#endif end if end if end do ! --> do el=1,myDim_elem2D - +!$OMP END DO +!$OMP DO do i=1, myDim_nod2d !___________________________________________________________________ if (ulevels_nod2D(i)>1) cycle @@ -702,15 +816,24 @@ subroutine EVPdynamics_m(ice, partit, mesh) v_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsv -rdt*mesh%coriolis_node(i)*rhsu) end if end do ! --> do i=1, myDim_nod2d - +!$OMP END DO !_______________________________________________________________________ ! apply sea ice velocity boundary condition +!$OMP DO do ed=1,myDim_edge2D !___________________________________________________________________ ! apply coastal sea ice velocity boundary conditions - if(myList_edge2D(ed) > edge2D_in) then - u_ice_aux(edges(:,ed))=0.0_WP - v_ice_aux(edges(:,ed))=0.0_WP + if (myList_edge2D(ed) > edge2D_in) then + do n=1, 2 +#if defined(_OPENMP) + call omp_set_lock (partit%plock(edges(n, ed))) +#endif + u_ice_aux(edges(n,ed))=0.0_WP + v_ice_aux(edges(n,ed))=0.0_WP +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(edges(n,ed))) +#endif + end do end if !___________________________________________________________________ @@ -718,26 +841,43 @@ subroutine EVPdynamics_m(ice, partit, mesh) if (use_cavity) then if ( (ulevels(edge_tri(1,ed))>1) .or. & ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then - u_ice_aux(edges(1:2,ed))=0.0_WP - v_ice_aux(edges(1:2,ed))=0.0_WP + do n=1, 2 +#if defined(_OPENMP) + call omp_set_lock (partit%plock(edges(n, ed))) +#endif + u_ice_aux(edges(n,ed))=0.0_WP + v_ice_aux(edges(n,ed))=0.0_WP +#if defined(_OPENMP) + call omp_unset_lock(partit%plock(edges(n,ed))) +#endif + end do end if end if end do ! --> do ed=1,myDim_edge2D - +!$OMP END DO !_______________________________________________________________________ +!$OMP MASTER call exchange_nod_begin(u_ice_aux, v_ice_aux, partit) - +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP end do - +!$OMP END DO +!$OMP MASTER call exchange_nod_end(partit) - +!$OMP END MASTER +!$OMP BARRIER +!$OMP END PARALLEL end do ! --> do shortstep=1, steps - u_ice=u_ice_aux - v_ice=v_ice_aux - +!$OMP PARALLEL DO + do row=1, myDim_nod2d+eDim_nod2D + u_ice(row)=u_ice_aux(row) + v_ice(row)=v_ice_aux(row) + end do +!$OMP END PARALLEL DO end subroutine EVPdynamics_m ! ! diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 index 03aba0b48..847fa60ae 100644 --- a/src/ifs_interface/ifs_interface.F90 +++ b/src/ifs_interface/ifs_interface.F90 @@ -9,6 +9,46 @@ MODULE nemogcmcoup_steps INTEGER :: substeps !per IFS timestep END MODULE nemogcmcoup_steps +#if defined(__MULTIO) +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver, irequired, iprovided, lmpi1) + + ! Initialize the NEMO mppio server + USE mpp_io + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + INTEGER :: irequired, iprovided + LOGICAL :: lmpi1 + + CALL mpp_io_init(icomm, lnemoioserver, irequired, iprovided, lmpi1) +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + ! Initialize the NEMO mppio server + USE mpp_io + + IMPLICIT NONE + INTEGER :: icomm + + CALL mpp_io_init_2( icomm ) + IF (lioserver) THEN + ! IO server finished, clean-up multio objects + CALL mpp_stop() + ENDIF +END SUBROUTINE nemogcmcoup_init_ioserver_2 + +SUBROUTINE nemogcmcoup_end_ioserver + ! Function is only called for the IO client. + USE mpp_io + + IMPLICIT NONE + + CALL mpp_stop() + END SUBROUTINE nemogcmcoup_end_ioserver +#endif + SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, & & lwaveonly, iatmunit, lwrite ) @@ -535,50 +575,6 @@ SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & nfield = nfield + 1 pgvcur(:) = zrecvnf(:,nfield) - ! Pack u(v) surface currents on elements - !zsendnfUV(:,1)=fesom%dynamics%UV(1,1,1:myDim_elem2D) - !zsendnfUV(:,2)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here - !nfielduv = 2 - ! - !do elem=1, myDim_elem2D - ! - ! ! compute element midpoints - ! elnodes=elem2D_nodes(:,elem) - ! rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS - ! rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS - ! - ! ! Rotate vectors to geographical coordinates (r2g) - ! CALL vector_r2g(zsendnfUV(elem,1), zsendnfUV(elem,2), rlon, rlat, 0) ! 0-flag for rot. coord - ! - !end do - -#ifdef FESOM_TODO - - ! We need to sort out the non-unique global index before we - ! can couple currents - - ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. - IF (lparintmultatm) THEN - CALL parinter_fld_mult( nfielduv, mype, npes, icomm, UVtogauss, & - & myDim_nod2D, zsendnfUV, & - & nopoints, zrecvnfUV ) - ELSE - DO jf = 1, nfielduv - CALL parinter_fld( mype, npes, icomm, UVtogauss, & - & myDim_nod2D, zsendnfUV(:,jf), & - & nopoints, zrecvnfUV(:,jf) ) - ENDDO - ENDIF - pgucur(:) = zrecvnfUV(:,1) - pgvcur(:) = zrecvnfUV(:,2) - -#else - - !pgucur(:) = 0.0 - !pgvcur(:) = 0.0 - -#endif - END SUBROUTINE nemogcmcoup_lim2_get @@ -822,24 +818,20 @@ SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & ! Sort out incoming arrays from the IFS and put them on the ocean grid ! TODO - shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? - !longwave(:)=0. ! Done. Only used in stand-alone mode. - prec_rain(:)=0. ! Done, updated below. - prec_snow(:)=0. ! Done, updated below. - evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? - sublimation=0. ! Done, updated below. + shortwave(:)=0. + !longwave(:)=0. + prec_rain(:)=0. + prec_snow(:)=0. + evap_no_ifrac=0. + sublimation=0. ! - ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) - oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? + ice_heat_flux=0. + oce_heat_flux=0. ! - !runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? - !evaporation(:)=0. - !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) - !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - stress_atmice_x=0. ! Done, taux_ice - stress_atmice_y=0. ! Done, tauy_ice - stress_atmoce_x=0. ! Done, taux_oce - stress_atmoce_y=0. ! Done, tauy_oce + stress_atmice_x=0. + stress_atmice_y=0. + stress_atmoce_x=0. + stress_atmoce_y=0. ! =================================================================== ! ! Pack all arrays @@ -1147,4 +1139,4 @@ SUBROUTINE nemogcmcoup_final endif CALL fesom_finalize -END SUBROUTINE nemogcmcoup_final +END SUBROUTINE nemogcmcoup_final \ No newline at end of file diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 index bc711a8c6..1cedbc82c 100644 --- a/src/ifs_interface/ifs_notused.F90 +++ b/src/ifs_interface/ifs_notused.F90 @@ -3,33 +3,6 @@ ! ! -Original code by Kristian Mogensen, ECMWF. -SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver - - -SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) - - ! Initialize the NEMO mppio server - - IMPLICIT NONE - INTEGER :: icomm - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_init_ioserver_2 - - SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) @@ -202,7 +175,7 @@ SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & ! Local variables if(fesom%mype==0) then - WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' + WRITE(0,*)'In nemogcmcoup_update_add FESOM dummy routine. Proceeding...' !CALL abort endif @@ -331,17 +304,3 @@ SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & CALL abort END SUBROUTINE nemogcmcoup_wam_update_stress - -SUBROUTINE nemogcmcoup_end_ioserver - - ! Close io servers - - IMPLICIT NONE - INTEGER :: icomm - LOGICAL :: lnemoioserver - - WRITE(*,*)'No mpp_ioserver' - CALL abort - -END SUBROUTINE nemogcmcoup_end_ioserver - diff --git a/src/ifs_interface/iom.F90 b/src/ifs_interface/iom.F90 new file mode 100644 index 000000000..3b3d8a8fb --- /dev/null +++ b/src/ifs_interface/iom.F90 @@ -0,0 +1,466 @@ +!===================================================== +! Input/Output manager : Library to write output files +! +! -Original code for NEMOv40 by ECMWF. +! -Adapted to FESOM2 by Razvan Aguridan, ECMWF, 2023. +!----------------------------------------------------- + +MODULE iom +#if defined(__MULTIO) + USE multio_api + USE, INTRINSIC :: iso_fortran_env, only: real64 + + IMPLICIT NONE + PRIVATE + + TYPE(multio_handle) :: mio_handle + INTEGER(8), PRIVATE :: mio_parent_comm + + PUBLIC iom_initialize, iom_init_server, iom_finalize + PUBLIC iom_send_fesom_domains + PUBLIC iom_field_request, iom_send_fesom_data + + PRIVATE ctl_stop + !!---------------------------------------------------------------------- + !! NEMO/OCE 4.0 , NEMO Consortium (2018) + !! $Id: iom.F90 13297 2020-07-13 08:01:58Z andmirek $ + !! Software governed by the CeCILL license (see ./LICENSE) + !!---------------------------------------------------------------------- + + TYPE iom_field_request + CHARACTER(100) :: name = REPEAT(" ", 100) + CHARACTER(100) :: category = REPEAT(" ", 100) + CHARACTER(5) :: gridType = REPEAT(" ", 5) + REAL(real64), DIMENSION(:), POINTER :: values => NULL() + INTEGER :: globalSize = 0 + INTEGER :: level = 0 + INTEGER :: step = 0 + END TYPE + +CONTAINS + + SUBROUTINE multio_custom_error_handler(context, err) + USE mpi + + IMPLICIT NONE + INTEGER(8), INTENT(INOUT) :: context ! Use mpi communicator as context + INTEGER, INTENT(IN) :: err + INTEGER :: mpierr + + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop( 'MULTIO ERROR: ', multio_error_string(err)) + IF (context /= MPI_UNDEFINED) THEN + CALL mpi_abort(int(context), MPI_ERR_OTHER, mpierr) + context = MPI_UNDEFINED + ENDIF + ENDIF + END SUBROUTINE + + + SUBROUTINE iom_initialize(client_id, local_comm, return_comm, global_comm ) + USE mpi + + IMPLICIT NONE + CHARACTER(LEN=*), INTENT(IN) :: client_id + INTEGER,INTENT(IN), OPTIONAL :: local_comm + INTEGER,INTENT(OUT), OPTIONAL :: return_comm + INTEGER,INTENT(IN), OPTIONAL :: global_comm + TYPE(multio_configuration) :: conf_ctx + INTEGER :: err + CHARACTER(len=16) :: err_str + + mio_parent_comm = mpi_comm_world + + err = multio_initialise() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Initializing multio failed: ', multio_error_string(err)) + END IF + + IF (PRESENT(global_comm)) THEN + mio_parent_comm = global_comm + ENDIF + + ! Prepare context and check errors explicitly until everything is set up - then failure handler is used + BLOCK + CHARACTER(:), allocatable :: config_file + INTEGER :: config_file_length + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', length=config_file_length) + IF (config_file_length == 0) THEN + call ctl_stop('The fesom plan file is not correctly set!') + err = conf_ctx%new() + ELSE + ALLOCATE(character(len=config_file_length + 1) :: config_file) + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', config_file) + err = conf_ctx%new(config_file) + + DEALLOCATE(config_file) + ENDIF + END BLOCK + + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Creating multio configuration context failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_allow_world_default_comm(.FALSE._1) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%mpi_allow_world_default_comm(.FALSE._1) failed: ', multio_error_string(err)) + END IF +! TODO: mpi_client_id not in multio main fapi only in dummy api +! err = conf_ctx%mpi_client_id(client_id) +! IF (err /= MULTIO_SUCCESS) THEN +! CALL ctl_stop('conf_ctx%mpi_client_id(', TRIM(client_id),') failed: ', multio_error_string(err)) +! END IF + + err = conf_ctx%mpi_return_client_comm(return_comm) + IF (err /= MULTIO_SUCCESS) THEN + WRITE (err_str, "(I10)") return_comm + CALL ctl_stop('conf_ctx%mpi_return_client_comm(', err_str,') failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_parent_comm(int(mio_parent_comm)) + IF (err /= MULTIO_SUCCESS) THEN + WRITE (err_str, "(I10)") mio_parent_comm + CALL ctl_stop('conf_ctx%mpi_parent_comm(', err_str,') failed: ', multio_error_string(err)) + END IF + + err = mio_handle%new(conf_ctx) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%new(conf_ctx) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%delete() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%delete() failed: ', multio_error_string(err)) + END IF + ! TODO: not in multio main fapi only in dummy api + ! Setting a failure handler that reacts on interface problems or exceptions that are not handled within the interface + +#if defined __ifsinterface + err = multio_set_failure_handler(multio_custom_error_handler, mio_parent_comm) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('setting multio failure handler failed: ', multio_error_string(err)) + END IF +#endif + + err = mio_handle%open_connections(); + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%open_connections failed: ', multio_error_string(err)) + END IF + END SUBROUTINE iom_initialize + + SUBROUTINE iom_finalize() + IMPLICIT NONE + INTEGER :: err + + err = mio_handle%close_connections(); + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%close_connections failed: ', multio_error_string(err)) + END IF + + err = mio_handle%delete(); + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('mio_handle%delete failed: ', multio_error_string(err)) + END IF + END SUBROUTINE iom_finalize + + SUBROUTINE iom_init_server(server_comm) + IMPLICIT NONE + INTEGER, INTENT(IN) :: server_comm + type(multio_configuration) :: conf_ctx + INTEGER :: err + CHARACTER(len=16) :: err_str + + mio_parent_comm = server_comm + + err = multio_initialise() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Initializing multio failed: ', multio_error_string(err)) + END IF + + ! Prepare context and check errors explicitly until everything is set up - then failure handler is used + + BLOCK + CHARACTER(:), allocatable :: config_file + INTEGER :: config_file_length + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', length=config_file_length) + IF (config_file_length == 0) THEN + err = conf_ctx%new() + ELSE + ALLOCATE(character(len=config_file_length + 1) :: config_file) + + CALL get_environment_variable('MULTIO_FESOM_CONFIG_FILE', config_file) + err = conf_ctx%new(config_file) + + DEALLOCATE(config_file) + ENDIF + END BLOCK + + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('Creating multio server configuration context failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_allow_world_default_comm(.FALSE._1) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%mpi_allow_world_default_comm(.FALSE._1) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%mpi_parent_comm(int(mio_parent_comm)) + IF (err /= MULTIO_SUCCESS) THEN + WRITE (err_str, "(I10)") mio_parent_comm + CALL ctl_stop('conf_ctx%mpi_parent_comm(', err_str,') failed: ', multio_error_string(err)) + END IF + ! TODO: not in multio main fapi only in dummy api + ! Setting a failure handler that reacts on interface problems or exceptions that are not handled within the interface + ! Set handler before invoking blocking start server call + +#if defined __ifsinterface + err = multio_set_failure_handler(multio_custom_error_handler, mio_parent_comm) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('setting multio failure handler failed: ', multio_error_string(err)) + END IF +#endif + + ! Blocking call + err = multio_start_server(conf_ctx) + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('multio_start_server(conf_ctx) failed: ', multio_error_string(err)) + END IF + + err = conf_ctx%delete() + IF (err /= MULTIO_SUCCESS) THEN + CALL ctl_stop('conf_ctx%delete() failed: ', multio_error_string(err)) + END IF + END SUBROUTINE iom_init_server + + SUBROUTINE iom_send_fesom_domains(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + + IMPLICIT NONE + + TYPE(multio_metadata) :: md + INTEGER :: cerr + INTEGER :: elem, elnodes(3), aux + TYPE(t_partit), INTENT(IN), TARGET :: partit + TYPE(t_mesh), intent(in), TARGET :: mesh + INTEGER, DIMENSION(:), POINTER :: temp + +#include "../associate_part_def.h" +#include "../associate_mesh_def.h" +#include "../associate_part_ass.h" +#include "../associate_mesh_ass.h" + + +#if defined __ifsinterface + cerr = md%new() +#else + cerr = md%new(mio_handle) +#endif + + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%new() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("name", "ngrid") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_string(name) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("category", "fesom-domain-nodemap") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_string(category) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("representation", "unstructured") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_string(representation) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("globalSize", mesh%nod2D) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_int(globalSize) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_bool("toAllServers", .TRUE._1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%set_bool(toAllServers) failed: ', multio_error_string(cerr)) + END IF + + temp => partit%myList_nod2D(1:partit%myDim_nod2D) + cerr = mio_handle%write_domain(md, temp - 1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, mio_handle%write_domain() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%delete() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: ngrid, md%delete() failed: ', multio_error_string(cerr)) + END IF + + !declare grid at elements + +#if defined __ifsinterface + cerr = md%new() +#else + cerr = md%new(mio_handle) +#endif + + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%new() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("name", "egrid") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_string(name) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("category", "fesom-domain-elemmap") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_string(category) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("representation", "unstructured") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_string(representation) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("globalSize", mesh%elem2D) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_int(globalSize) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_bool("toAllServers", .TRUE._1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%set_bool(toAllServers) failed: ', multio_error_string(cerr)) + END IF + + cerr = mio_handle%write_domain(md, partit%myList_elem2D(partit%myInd_elem2D_shrinked) - 1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, mio_handle%write_domain() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%delete() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_domains: egrid, md%delete() failed: ', multio_error_string(cerr)) + END IF + END SUBROUTINE iom_send_fesom_domains + + SUBROUTINE iom_send_fesom_data(data) + USE g_clock + IMPLICIT NONE + + TYPE(iom_field_request), INTENT(INOUT) :: data + INTEGER :: cerr + TYPE(multio_metadata) :: md + + +#if defined __ifsinterface + cerr = md%new() +#else + cerr = md%new(mio_handle) +#endif + + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%new() failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("category", data%category) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(category) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("globalSize", data%globalSize) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(globalSize) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("level", data%level) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(level) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_bool("toAllServers", .FALSE._1) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_bool(toAllServers) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("name", trim(data%name)) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(name) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("gridSubtype", "undefined") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(gridSubType) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("grid-type", "undefined") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(grid-type) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("operation", "average") + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(operation) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_string("domain", data%gridType) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_string(domain) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("step", data%step) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(step) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("stepInHours", data%step*24) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(stepInHours) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("timeSpanInHours", 24) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(timeSpanInHours) failed: ', multio_error_string(cerr)) + END IF + + cerr = md%set_int("currentDate", yearnew * 10000 + month * 100 + day_in_month) + cerr = md%set_int("currentTime", INT(INT(timenew / 3600) * 10000 + (INT(timenew / 60) - INT(timenew / 3600) * 60) * 100 + (timenew-INT(timenew / 60) * 60))) + cerr = md%set_int("startDate", 2020 * 10000 + 01 * 100 + 20) + cerr = md%set_int("startTime", 0) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%set_int(date) failed: ', multio_error_string(cerr)) + END IF + + cerr = mio_handle%write_field(md, data%values) + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: mio_handle%write_field failed: ', multio_error_string(cerr)) + END IF + + cerr = md%delete() + IF (cerr /= MULTIO_SUCCESS) THEN + CALL ctl_stop('send_fesom_data: md%delete failed: ', multio_error_string(cerr)) + END IF + END SUBROUTINE + + SUBROUTINE ctl_stop(m1, m2, m3, m4) + USE mpi + + IMPLICIT NONE + CHARACTER(len=*), INTENT(in), OPTIONAL :: m1, m2, m3, m4 + INTEGER :: dummy + + IF ( PRESENT(m1) ) WRITE(*,*) m1 + IF ( PRESENT(m2) ) WRITE(*,*) m2 + IF ( PRESENT(m3) ) WRITE(*,*) m3 + IF ( PRESENT(m4) ) WRITE(*,*) m4 + + CALL mpi_abort(mpi_comm_world, 1, dummy) + END SUBROUTINE ctl_stop + + !!====================================================================== +#endif +END MODULE iom diff --git a/src/ifs_interface/mpp_io.F90 b/src/ifs_interface/mpp_io.F90 new file mode 100644 index 000000000..c9d86e627 --- /dev/null +++ b/src/ifs_interface/mpp_io.F90 @@ -0,0 +1,226 @@ +!===================================================== +! Ocean output intialisation. +! +! -Original code for NEMOv40 by Kristian Mogensen, ECMWF. +! -Adapted to FESOM2 by Razvan Aguridan, ECMWF, 2023. +!----------------------------------------------------- + +MODULE mpp_io +#if defined(__MULTIO) + USE iom + IMPLICIT NONE + PRIVATE + + PUBLIC & + & mpp_io_init, & + & mpp_io_init_2, & + & mpp_stop + + INTEGER :: ntask_multio = 0 + INTEGER :: ntask_xios = 0 + LOGICAL, PUBLIC :: lioserver, lmultioserver, lmultiproc + INTEGER :: ntask_notio + INTEGER, SAVE :: mppallrank, mppallsize, mppiorank, mppiosize + INTEGER, SAVE :: mppmultiorank, mppmultiosize + INTEGER, SAVE :: mppcomprank, mppcompsize + INTEGER, SAVE :: pcommworld, pcommworldmultio + + CONTAINS + + SUBROUTINE mpp_io_init( iicomm, lio, irequired, iprovided, lmpi1 ) + + INCLUDE "mpif.h" + INTEGER, INTENT(INOUT) :: iicomm + LOGICAL, INTENT(INOUT) :: lio + INTEGER, INTENT(INOUT) :: irequired, iprovided + LOGICAL, INTENT(IN) :: lmpi1 + + INTEGER :: icode, ierr, icolor + LOGICAL :: mpi_called + CHARACTER(len=128) :: cdlogfile + INTEGER :: ji + NAMELIST/namio/ntask_multio,ntask_xios + + CALL mpi_initialized( mpi_called, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init: Error in routine mpi_initialized' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + IF( mpi_called ) THEN + WRITE(*,*)' mpi_io_init assumes that it is initialising MPI' + CALL mpi_abort( mpi_comm_world, 1, ierr ) + ENDIF + + IF (lmpi1) THEN + CALL mpi_init( icode ) + ELSE +#ifdef MPI1 + WRITE(0,*)'mpp_io_init:' + WRITE(0,*)'MPI1 defined but lmpi1 is false' + CALL abort +#else + CALL mpi_init_thread(irequired,iprovided,icode) +#endif + ENDIF + + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init: Error in routine mpi_init' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + CALL mpi_comm_rank( mpi_comm_world, mppallrank, ierr ) + CALL mpi_comm_size( mpi_comm_world, mppallsize, ierr ) + + OPEN(10,file='namio.in') + READ(10,namio) + WRITE(*,namio) + CLOSE(10) + + IF ( ntask_xios + ntask_multio == 0 ) THEN + iicomm = mpi_comm_world + lio=.FALSE. + RETURN + ENDIF + + ntask_notio = mppallsize - ntask_xios - ntask_multio + IF ((mppallrank+1)<=ntask_notio) THEN + icolor=1 + lioserver=.FALSE. + lmultioserver=.FALSE. + ELSE + icolor=3 + lioserver=.TRUE. + lmultioserver=.TRUE. + ENDIF + lio=lioserver + + CALL mpi_comm_split( mpi_comm_world, icolor, 0, iicomm, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init: Error in routine mpi_comm_split' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + IF (lioserver) THEN + CALL mpi_comm_rank( iicomm, mppiorank, ierr ) + CALL mpi_comm_size( iicomm, mppiosize, ierr ) + WRITE(cdlogfile,'(A,I4.4,A)')'nemo_io_server.',mppiorank,'.log' + ELSE + mppiorank=0 + mppiosize=0 + ENDIF + lio=lioserver + + END SUBROUTINE mpp_io_init + + SUBROUTINE mpp_io_init_2( iicomm ) + INCLUDE "mpif.h" + + INTEGER, INTENT(INOUT) :: iicomm + + INTEGER :: icode, ierr, icolor, iicommx, iicommm, iicommo + INTEGER :: ji,inum + LOGICAL :: lcompp +#if defined(__MULTIO) && !defined(__ifsinterface) && !defined(__oasis) + ! Construct multio server communicator in FESOM standalone + INTEGER :: commsize, myrank + character(len=255) :: oce_npes_str, mio_npes_str + integer :: oce_npes_int, mio_npes_int, oce_status, mio_status + ! fesom standalone with MULTIO + ! both below envs are not needed as one can be deduced from other by subtracting + ! from global total pes + CALL get_environment_variable('OCE_NPES', oce_npes_str, status=oce_status) + CALL get_environment_variable('MIO_NPES', mio_npes_str, status=mio_status) + CALL MPI_Comm_Size(MPI_COMM_WORLD, commsize, ierr) + CALL MPI_Comm_Rank(MPI_COMM_WORLD, myrank, ierr) + if ((oce_status/=0) .or. (mio_status/=0)) then + if (oce_status/=0) then + if (myrank==0) write(*,*) '$OCE_NPES variable is not set!' + end if + if (mio_status/=0) then + if (myrank==0) write(*,*) '$MIO_NPES variable is not set!' + end if + !call par_ex(MPI_COMM_WORLD, myrank, abort=.true.) ! TODO:doesn't work see par_ex + call mpi_abort(mpi_comm_world) + stop + end if + read(oce_npes_str,*,iostat=oce_status) oce_npes_int + read(mio_npes_str,*,iostat=mio_status) mio_npes_int + if (myrank==0) write(*,*) 'Total number of processes: ', commsize + if (myrank==0) write(*,*) 'FESOM runs on ', oce_npes_int, ' processes' + if (myrank==0) write(*,*) 'MULTIO Server runs on ', mio_npes_int, ' processes' + + ! note: in case of ifsinterface lmultioserver and lioserver are init in mpp_init + if (myrank > oce_npes_int-1) then + lmultioserver = .true. + lioserver = .true. + ENDIF +#endif + + + IF (lmultioserver.OR..NOT.lioserver) THEN + icolor=12 + ELSE + icolor=13 + ENDIF + + CALL mpi_comm_split( iicomm, icolor, 0, pcommworldmultio, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init2: Error in routine mpi_comm_split' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + CALL mpi_comm_rank( pcommworldmultio, mppmultiorank, ierr ) + CALL mpi_comm_size( pcommworldmultio, mppmultiosize, ierr ) + + ! Construct compute communicator + + IF (.NOT.lioserver) THEN + icolor=14 + lcompp=.TRUE. + ELSE + icolor=15 + lcompp=.FALSE. + ENDIF + + CALL mpi_comm_split( iicomm, icolor, 0, iicommo, icode ) + IF ( icode /= MPI_SUCCESS ) THEN + WRITE(*,*)' mpp_io_init2: Error in routine mpi_comm_split' + CALL mpi_abort( mpi_comm_world, icode, ierr ) + ENDIF + + CALL mpi_comm_rank( iicommo, mppcomprank, ierr ) + CALL mpi_comm_size( iicommo, mppcompsize, ierr ) + + IF (.NOT.lioserver) THEN + CALL iom_initialize( "feosom client", return_comm=iicommm, global_comm = MPI_COMM_WORLD ) ! nemo local communicator given by xios + !iicomm = iicommm + ELSE + ! For io-server tasks start an run the right server + CALL iom_init_server( server_comm = iicomm ) + ENDIF + + ! Return to the model with iicomm being compute only tasks + iicomm = iicommo +#if defined(__MULTIO) && !defined(__ifsinterface) && !defined(__oasis) + IF (lioserver) THEN + ! TODO: unless we fix partit and segregate compute communicators from IO + ! communicators it is hard to elegantly close either FESOM or MULTIO + ! currently force stopping multio after all connections from clients are closed + stop "Exiting MIO server" + END IF +#endif + END SUBROUTINE mpp_io_init_2 + + SUBROUTINE mpp_stop + INTEGER :: ierr + + IF (.NOT.lioserver) THEN + call iom_finalize() + ENDIF + +#ifdef __ifsinterface + CALL mpi_finalize( ierr ) +#endif + END SUBROUTINE mpp_stop +#endif +END MODULE mpp_io diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 2c70801c9..7c3de6ed9 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -20,6 +20,8 @@ module io_MEANDATA type(t_partit), pointer :: p_partit integer :: ndim integer :: glsize(2) + integer :: shrinked_size + integer, allocatable, dimension(:) :: shrinked_indx integer :: accuracy real(real64), allocatable, dimension(:,:) :: local_values_r8 real(real32), allocatable, dimension(:,:) :: local_values_r4 @@ -655,9 +657,9 @@ subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ ! output Monin-Obukov (TB04) mixing length - if (use_momix) then - call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, partit, mesh) - end if + !if (use_momix) then + ! call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, partit, mesh) + !end if !___________________________________________________________________________ if (ldiag_curl_vel3) then @@ -1135,6 +1137,9 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) use MOD_ICE use mod_tracer use io_gather_module +#if defined(__MULTIO) + use iom +#endif #if defined (__icepack) use icedrv_main, only: init_io_icepack #endif @@ -1150,11 +1155,10 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) type(t_tracer), intent(in) , target :: tracers type(t_dyn) , intent(in) , target :: dynamics type(t_ice) , intent(inout), target :: ice - - character(:), allocatable :: filepath - real(real64) :: rtime !timestamp of the record + character(:), allocatable :: filepath + real(real64) :: rtime !timestamp of the record - ctime=timeold+(dayold-1.)*86400 +ctime=timeold+(dayold-1.)*86400 !___________________________________________________________________________ if (lfirst) then @@ -1172,14 +1176,18 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) !___________________________________________________________________________ !PS if (partit%flag_debug .and. partit%mype==0) print *, achar(27)//'[33m'//' -I/O-> call update_means'//achar(27)//'[0m' call update_means - !___________________________________________________________________________ ! loop over defined streams do n=1, io_NSTREAMS !_______________________________________________________________________ ! make pointer for entry onto io_stream object entry=>io_stream(n) - +!#if defined(__MULTIO) +! call mio_write_nod(mio, entry) +! lfirst=.false. +! return +!#endif + !_______________________________________________________________________ !check whether output will be written based on event frequency do_output=.false. @@ -1204,6 +1212,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) ! if its time for output --> do_output==.true. if (do_output) then if (vec_autorotate) call io_r2g(n, partit, mesh) ! automatically detect if a vector field and rotate if makes sense! +#if !defined(__MULTIO) if(entry%thread_running) call entry%thread%join() entry%thread_running = .false. @@ -1256,7 +1265,7 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) entry%rec_count=max(entry%rec_count, 1) write(*,*) trim(entry%name)//': current mean I/O counter = ', entry%rec_count end if ! --> if(partit%mype == entry%root_rank) then - +#endif !___________________________________________________________________ ! write double precision output if (entry%accuracy == i_real8) then @@ -1281,17 +1290,22 @@ subroutine output(istep, ice, dynamics, tracers, partit, mesh) END DO ! --> DO J=1, size(entry%local_values_r4,dim=2) !$OMP END PARALLEL DO end if ! --> if (entry%accuracy == i_real8) then - !___________________________________________________________________ entry%addcounter = 0 ! clean_meanarrays entry%ctime_copy = ctime - + +#if defined(__MULTIO) +! if (n==1) then + entry%rec_count = istep + call send_data_to_multio(entry) +! end if +#else !___________________________________________________________________ ! this is where the magic happens --> here do_output_callback is ! triggered as a method of the io_stream object --> call write_mean(...) call entry%thread%run() entry%thread_running = .true. - +#endif endif ! --> if (do_output) then end do ! --> do n=1, io_NSTREAMS lfirst=.false. @@ -1569,8 +1583,16 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, !___________________________________________________________________________ if(entry%glsize(1)==mesh%nod2D .or. entry%glsize(2)==mesh%nod2D) then entry%is_elem_based = .false. + entry%shrinked_size=partit%myDim_nod2D else if(entry%glsize(1)==mesh%elem2D .or. entry%glsize(2)==mesh%elem2D) then entry%is_elem_based = .true. + entry%shrinked_size=partit%myDim_elem2D_shrinked + allocate(entry%shrinked_indx(entry%shrinked_size)) + entry%shrinked_indx=partit%myInd_elem2D_shrinked +! write(*,*) partit%mype, partit%myDim_elem2D, partit%myDim_elem2D_shrinked, partit%myDim_elem2D-partit%myDim_elem2D_shrinked +! entry_index=0 +! call MPI_AllREDUCE(partit%myDim_elem2D_shrinked, entry_index, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, err) +! write(*,*) 'total elem=', mesh%elem2D, entry_index else if(partit%mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" stop @@ -1709,4 +1731,48 @@ subroutine io_r2g(n, partit, mesh) !$OMP END PARALLEL DO END IF end subroutine + +#if defined(__MULTIO) +SUBROUTINE send_data_to_multio(entry) + USE iom + USE multio_api + + IMPLICIT NONE + + TYPE(Meandata), TARGET, INTENT(INOUT) :: entry + TYPE(iom_field_request) :: request + REAL(real64), DIMENSION(SIZE(entry%shrinked_indx)), TARGET :: temp + INTEGER :: numLevels, globalSize, lev, i + + numLevels = entry%glsize(1) + globalSize = entry%glsize(2) + + request%name = trim(entry%name) + IF (.NOT. entry%is_elem_based) THEN + request%gridType = "ngrid" + ELSE + request%gridType = "egrid" + END IF + request%globalSize = globalSize + request%step = entry%rec_count + if (numLevels==1) then + request%category="ocean-2d" + else + request%category="ocean-3d" + end if + ! loop over vertical layers --> do gather 3d variables layerwise in 2d slices + DO lev=1, numLevels + request%level = lev + IF (.NOT. entry%is_elem_based) THEN + request%values => entry%local_values_r8_copy(lev, 1:entry%shrinked_size) + ELSE + DO i = 1, SIZE(entry%shrinked_indx) + temp(i) = entry%local_values_r8_copy(lev, entry%shrinked_indx(i)) + END DO + request%values => temp + END IF + CALL iom_send_fesom_data(request) + END DO +END SUBROUTINE +#endif end module diff --git a/src/node_contour_boundary.h b/src/node_contour_boundary.h new file mode 100644 index 000000000..938ce8d87 --- /dev/null +++ b/src/node_contour_boundary.h @@ -0,0 +1,28 @@ +flag=1 +nn=0 +do while (flag==1) + if (elem2D_nodes(1,elem)==n) then + edge_left=elem_edges(3,elem) + edge_right=elem_edges(2,elem) + elseif (elem2D_nodes(2,elem)==n) then + edge_left=elem_edges(1,elem) + edge_right=elem_edges(3,elem) + else + edge_left=elem_edges(2,elem) + edge_right=elem_edges(1,elem) + end if + nn=nn+1 + nedges(nn)=edge_left + nelems(nn)=elem + el=edge_tri(:,edge_right) + if (el(2)>0) then + if (el(1)==elem) then + elem=el(2) + else + elem=el(1) + end if + else !the last element + nedges(nn+1)=edge_right + flag=0 + end if +end do \ No newline at end of file diff --git a/src/node_contour_inner.h b/src/node_contour_inner.h new file mode 100644 index 000000000..1e4555f89 --- /dev/null +++ b/src/node_contour_inner.h @@ -0,0 +1,25 @@ +nn=0 +do while (nn 1) cycle ! --> if cavity node hbar == hbar_old hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/areasvol(ulevels_nod2D(n),n) end do !$OMP END PARALLEL DO @@ -2495,7 +2494,7 @@ subroutine vert_vel_ale(dynamics, partit, mesh) end do !$OMP END PARALLEL DO -!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, c1, c2) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 @@ -3165,7 +3164,7 @@ subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) ! rigid lid. !$OMP PARALLEL DO do node=1, myDim_nod2D+eDim_nod2D - if (ulevels_nod2D(node)==1) eta_n(node)=alpha*hbar(node)+(1.0_WP-alpha)*hbar_old(node) + eta_n(node)=alpha*hbar(node)+(1.0_WP-alpha)*hbar_old(node) end do !$OMP END PARALLEL DO ! --> eta_(n) diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index 34b9528ff..932110adf 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -274,7 +274,8 @@ subroutine solve_tracers_ale(ice, dynamics, tracers, partit, mesh) end do !$OMP END PARALLEL DO end if - + + ! TODO: do it only when it is coupled to atmosphere !___________________________________________________________________________ ! to avoid crash with high salinities when coupled to atmosphere ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 653785313..1843e345b 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -842,28 +842,28 @@ SUBROUTINE read_mesh(partit, mesh) n=com_elem2D_full%sptr(com_elem2D_full%sPEnum+1)-1 ALLOCATE(com_elem2D_full%slist(n)) read(fileID,*) com_elem2D_full%slist - -!!$ read(fileID,*) com_edge2D%rPEnum -!!$ ALLOCATE(com_edge2D%rPE(com_edge2D%rPEnum)) -!!$ read(fileID,*) com_edge2D%rPE -!!$ ALLOCATE(com_edge2D%rptr(com_edge2D%rPEnum+1)) -!!$ read(fileID,*) com_edge2D%rptr -!!$ ALLOCATE(com_edge2D%rlist(eDim_edge2D)) -!!$ read(fileID,*) com_edge2D%rlist -!!$ -!!$ read(fileID,*) com_edge2D%sPEnum -!!$ ALLOCATE(com_edge2D%sPE(com_edge2D%sPEnum)) -!!$ read(fileID,*) com_edge2D%sPE -!!$ ALLOCATE(com_edge2D%sptr(com_edge2D%sPEnum+1)) -!!$ read(fileID,*) com_edge2D%sptr -!!$ n=com_edge2D%sptr(com_edge2D%sPEnum+1)-1 -!!$ ALLOCATE(com_edge2D%slist(n)) -!!$ read(fileID,*) com_edge2D%slist close(fileID) + if (mype==0) write(*,*) 'communication arrays are read' deallocate(rbuff, ibuff) deallocate(mapping) - + +! necessary for MULTIO auxuary data: +! one element might belong to several processes hence we unify the element partition +! such that sum(myDim_elem2D_shrinked) over all processors will give elem2D + partit%myDim_elem2D_shrinked=0 + DO n=1, myDim_elem2D + if (mesh%elem2D_nodes(1, n) > myDim_nod2D) cycle + partit%myDim_elem2D_shrinked=partit%myDim_elem2D_shrinked+1 + END DO + allocate(partit%myInd_elem2D_shrinked(partit%myDim_elem2D_shrinked)) +! fill the respective indicies + nn=1 + DO n=1, myDim_elem2D + if (mesh%elem2D_nodes(1, n) > myDim_nod2D) cycle + partit%myInd_elem2D_shrinked(nn)=n + nn=nn+1 + END DO ! no checksum for now, execute_command_line is failing too often. if you think it is important, please drop me a line and I will try to revive it: jan.hegewald@awi.de mesh%representative_checksum = '' diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index e9b601b98..9b1087248 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -417,7 +417,7 @@ subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - do nz=1,nlevels_nod2D(n)-1 + do nz=ulevels_nod2D(n),nlevels_nod2D(n)-1 !_______________________________________________________________ ! check temp if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & @@ -469,7 +469,7 @@ subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) !_______________________________________________________________ ! check salt if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & - tracers%data(2)%values(nz, n) <=3.0_WP .or. tracers%data(2)%values(nz, n)>=45.0_WP ) then + tracers%data(2)%values(nz, n) <3.0_WP .or. tracers%data(2)%values(nz, n) >45.0_WP ) then !$OMP CRITICAL found_blowup_loc=1 write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep diff --git a/work/job_ini_ollie b/work/job_ini_ollie deleted file mode 100755 index 693296bad..000000000 --- a/work/job_ini_ollie +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=fesom2.0_INI -#SBATCH -p mpp -#SBATCH --ntasks=32 -#SBATCH --time=12:00:00 -#SBATCH -o slurm-out.out -#SBATCH -e slurm-err.out - -set -x - -ulimit -s unlimited - -module purge -module load intel.compiler -module load intel.mpi -module load netcdf/4.4.0_intel - -ln -s ../bin/fesom_ini.x . # cp -n ../bin/fvom_ini.x -cp -n ../config/namelist.config . -cp -n ../config/namelist.forcing . -cp -n ../config/namelist.oce . -cp -n ../config/namelist.ice . - - -# determine JOBID -JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` - -date -srun --mpi=pmi2 --ntasks=1 ./fesom_ini.x > "fvom_ini.out" -date - diff --git a/work/job_levante b/work/job_levante index 721dc361d..824e233a7 100755 --- a/work/job_levante +++ b/work/job_levante @@ -28,6 +28,10 @@ cp -n ../config/namelist.forcing . cp -n ../config/namelist.oce . cp -n ../config/namelist.ice . cp -n ../config/namelist.icepack . +cp -n ../config/namelist.tra . +cp -n ../config/namelist.io . +cp -n ../config/namelist.cvmix . +cp -n ../config/namelist.dyn . date srun -l fesom.x > "fesom2.0.out" diff --git a/work/job_ollie b/work/job_ollie deleted file mode 100755 index ac2516115..000000000 --- a/work/job_ollie +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=fesom2.0 -#SBATCH -p mpp -#SBATCH --ntasks=288 -#SBATCH --time=00:05:00 -#SBATCH -o slurm-out.out -#SBATCH -e slurm-err.out -module load intel.compiler intel.mpi netcdf/4.4.0_intel -module load centoslibs - -set -x - -ulimit -s unlimited - -# determine JOBID -JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` - -ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x -cp -n ../config/namelist.config . -cp -n ../config/namelist.forcing . -cp -n ../config/namelist.oce . -cp -n ../config/namelist.ice . -cp -n ../config/namelist.io . -cp -n ../config/namelist.icepack . - -date -srun --mpi=pmi2 ./fesom.x > "fesom2.0.out" -date - -#qstat -f $PBS_JOBID -#export EXITSTATUS=$? -#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then -#sbatch job_ollie -#fi diff --git a/work/job_ollie_chain b/work/job_ollie_chain deleted file mode 100755 index 8f98f1515..000000000 --- a/work/job_ollie_chain +++ /dev/null @@ -1,317 +0,0 @@ -#!/bin/bash -#___SET SLURM OPTIONS___________________________________________________________ -#SBATCH -J chain -#SBATCH -p mpp -#SBATCH --ntasks=432 -#SBATCH --time=10:00:00 -#SBATCH --mail-type=END -#SBATCH --mail-user=Patrick.Scholz@awi.de -#SBATCH -o fesom2.0_%x_%j.out -#SBATCH -e fesom2.0_%x_%j.out - -## module load intel.compiler intel.mpi netcdf centoslibs - -#___DEFAULT INPUT_______________________________________________________________ -# how many job chains should be applied -chain_n=3 # number chain cycles -chain_s=1 # starting chain id - -# time frame of model simulation -# ___COREv2___ -year_s=1948 -year_e=2009 -# ___JRA55____ -#year_s=1958 -#year_e=2018 - -prescribe_rlen=0 # run length in namelist.config --> if 0 value from namelist.config is taken -fedit=1 - -#___HELP OUTPUT_________________________________________________________________ -script_name=job_ollie_chain -function usage { - echo "usage: $script_name [-cn ...] [-cs ...] [-ys ...] [-ye ...] [-wcl ...]" - echo " -cn number of chain cylces (default: 3)" - echo " -cs starting chain id (default: 1)" - echo " -ys starting year of model simulation (default: 1948)" - echo " -ye ending year of model simulation (default: 2009)" - echo " -h display help" - echo " -rl prescribe used run_length" - echo " -noedit no automatic editing of namelist.config" - echo - echo " --> for changing the wall-clock-time interactively use " - echo " sbatch --time=00:10:00 job_ollie_chain ..." - echo " --> for changing the number of task interactively use " - echo " sbatch --ntask=288 job_ollie_chain ..." - echo " --> for changing the job name interactively use " - echo " sbatch --job-name=whatever job_ollie_chain ..." - exit 1 -} - -#___OVERRIDE DEFAULT INPUT BY COMMANDLINE INPUT_________________________________ -while [ "$1" != "" ]; do - case $1 in - -cn | -chain_n ) shift ; chain_n=$1 ;; - -cs | -chain_s ) shift ; chain_s=$1 ;; - -ys | -year_s ) shift ; year_s=$1 ;; - -ye | -year_e ) shift ; year_e=$1 ;; - -rl | --run_length ) shift ; prescribe_rlen=$1 ;; - -noedit | --noedit ) fedit=0 ;; - -h | --help ) usage ; exit ;; - esac - shift -done - -#___EXTRACT WALL-CLOCK-TIME FROM JOBINFO________________________________________ -# either setted via #SBATCH time=... or at command line sbatch --time=... job_ollie_chain -# need here to extract to give the next job chain cycle as input -jobinfo=$(scontrol show job $SLURM_JOB_ID) -wcl=$( echo ${jobinfo##*"TimeLimit="} | cut -d " " -f 1 ) -ntasks=$( echo ${jobinfo##*"NumTasks="} | cut -d " " -f 1 ) - - -#___SET NAMELIST'S & EXECUTABLE IF NOT ALREADY EXIST____________________________ -set -x -ulimit -s unlimited -ln -s ../bin/fesom.x . # cp -n ../bin/fvom_ini.x -cp -n ../config/namelist.config . -cp -n ../config/namelist.forcing . -cp -n ../config/namelist.oce . -cp -n ../config/namelist.ice . - -#___SET CHAIN_ID________________________________________________________________ -if [ -f "file_chain_id" ]; then - chain_id=$( file_chain_id -fi - -#___PRINT INPUT INFO____________________________________________________________ -echo -e "\033[1;7;33m_____JOB CHAIN INFO_____________________________________\033[0m" -echo -e "\033[1;33m --> actual chain cycle: $chain_id \033[0m" -echo -e "\033[1;33m --> max. number of chain cycles: $chain_n \033[0m" -echo -e "\033[1;33m --> simulated time range: [ $year_s $year_e] \033[0m" -echo -e "\033[1;33m --> slurm: wall-clock-time = $wcl \033[0m" -echo -e "\033[1;33m --> slurm: ntask = $ntasks \033[0m" -if [ $prescribe_rlen -ne 0 ]; then - echo -e "\033[1;33m -->change run_length = $prescribe_rlen \033[0m" -fi - -#___CREATE SAVE DIR INFRASTRUCTURE______________________________________________ -# extract resultpath from namelist.config -dname_result_link_orig=$(grep "ResultPath=" namelist.config | grep -v '^!' | \ - cut -d "=" -f 2 | \ - cut -d "'" -f 2) -dname_result_link=$(echo ${dname_result_link_orig::-1}) - -# identify higher directory --> ResultPath -dname_result="$(dirname "$dname_result_link")/" - -# if ResultPath directory doesn't exist --> create it -if [ ! -d "${dname_result}" ]; then - echo -e "\033[33m --> ResultPath directory does not exist --> will create it \033[0m" - mkdir "${dname_result}" -fi - -# check if in namelist.config a chain path is given (that mean -# ResultPath=.../ResultDir/chain/) if not change namelist.config accordingly -check=${dname_result_link##*${dname_result}} -if [ $fedit -eq 1 ] && [ $check != "chain" ]; then - echo -e "\033[33m --> replace in namelist.config ResultPath with chain path \033[0m" - dname_result_link="${dname_result}chain" - sed -i "s|${dname_result_link_orig}|${dname_result_link}/|g" namelist.config -fi - -# identify real path in case a link is used -dname_result="$(realpath "$dname_result")/" - -# if directory for chain_id doesn't exist --> create it -if [ ! -d "${dname_result}/${chain_id}" ]; then - echo -e "\033[33m --> chain_id directory does not exist --> will create it \033[0m" - mkdir "${dname_result}/${chain_id}" -fi - -# link directory of chain_id with original linkdirectory from namelist.config -ln -sfn ${dname_result}${chain_id} $dname_result_link - -#___CHECK IF SIMULATION NEEDS TO BE INITIALISED OR CONTINUED____________________ -is_newsimul=1 -if [ -f "$dname_result_link/fesom.clock" ] ; then - aux_yr_clock=$(<${dname_result_link}/fesom.clock) - aux_yr_clock=$(echo ${aux_yr_clock} | cut -d" " -f 6) - if [ $aux_yr_clock -le $year_e ]; then is_newsimul=0 ; fi - - #___________________________________________________________________________ - if [ $fedit -eq 1 ] ; then - if [ $is_newsimul -eq 0 ] ; then - year_d=$(( $year_e - $aux_yr_clock + 1 )) - rlen=$(grep "run_length=" namelist.config | cut -d "=" -f 2 | cut -d " " -f 1) - # do not prescribe run length in job fle - if [ $prescribe_rlen -eq 0 ] ; then - if [ $rlen -ne $year_d ] ; then - sed -i " s/run_length=$rlen/run_length=$year_d/" namelist.config - echo -e "\033[1;33m --> change run_length to: $year_d \033[0m" - fi - # prescribe run length in job fle - else - aux_year_d=$prescribe_rlen - if [ $year_d -lt $aux_year_d ] ; then aux_year_d=$year_d ; fi - if [ $rlen -ne $year_d ] ; then - sed -i " s/run_length=$rlen/run_length=$aux_year_d/" namelist.config - echo -e "\033[1;33m --> change run_length to: $aux_year_d \033[0m" - fi - fi - fi - fi -else - #___________________________________________________________________________ - # set model run length in job_script and change namelist.config accordingly - # to match - if [ $fedit -eq 1 ] ; then - year_d=$(( $year_e - $year_s + 1 )) - rlen=$(grep "run_length=" namelist.config | cut -d "=" -f 2 | cut -d " " -f 1) - # do not prescribe run length in job fle - if [ $prescribe_rlen -eq 0 ] ; then - if [ $rlen -ne $year_d ] ; then - sed -i " s/run_length=$rlen/run_length=$year_d/" namelist.config - echo -e "\033[1;33m --> change run_length to: $year_d \033[0m" - fi - # prescribe run length in job file - else - aux_year_d=$prescribe_rlen - if [ $year_d -lt $aux_year_d ] ; then aux_year_d=$year_d ; fi - if [ $rlen -ne $year_d ] ; then - sed -i " s/run_length=$rlen/run_length=$aux_year_d/" namelist.config - echo -e "\033[1;33m --> change run_length to: $aux_year_d \033[0m" - fi - fi - fi -fi - -#___CREATE CLOCK & RESTART INFRASTRUCTURE FOR COLD/WARM START___________________ -# only touch clock file when a new simulation is supposed to start, if an old one -# should be continued dont touch it -if [ $is_newsimul -eq 1 ] ; then - - # --> make cold start - if [ $chain_id -eq 1 ] ; then - #_______________________________________________________________________ - # create cold start clock file - echo -e "\033[33m --> create cold start clock file \033[0m" - printf "0 1 ${year_s}\n0 1 ${year_s}" > $dname_result_link/fesom.clock - - #_______________________________________________________________________ - # in case yearnew in namelist.config was changed from 1948 - yearnew=$(grep "yearnew=" namelist.config | cut -d "=" -f 2) - if [ $yearnew -ne $year_s ]; then - sed -i " s/yearnew=$yearnew/yearnew=$year_s/" namelist.config - fi - - #___BACKUP NAMELIST.* FILES INTO RESULT DIRECTORY_______________________ - cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix namelist.tra namelist.dyn ${dname_result}/. - cp fesom.x ${dname_result}/. - - #___BACKUP SRC FILES INTO RESULT DIRECTORY______________________________ - if [ ! -d "${dname_result}/src" ]; then mkdir "${dname_result}/src" ; fi - cp ../src/*.F90 ${dname_result}/src/. - - - # --> make warm start - else - #_______________________________________________________________________ - # create warm start clock file - stepperday=$(grep "step_per_day=" namelist.config | cut -d "=" -f 2 | cut -d " " -f 1 ) - aux_sec=$(( 86400 - 86400 / $stepperday )) - aux_day=365 - aux_yr=$(( $year_s - 1 )) - echo -e "\033[33m --> create warm start clock file \033[0m" - printf "${aux_sec} ${aux_day} ${aux_yr}\n0 1 ${year_s}" > $dname_result_link/fesom.clock - - #_______________________________________________________________________ - # chain id from previous spinup cycle - prev_chain_id=$(( $chain_id - 1 )) - - #_______________________________________________________________________ - # copy restart ocean files/directories from previous spinup cycle - prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.oce.restart - if [ -d "${prev_rfile}" ]; then - cp -r ${prev_rfile} ${dname_result_link}/fesom.${aux_yr}.oce.restart - elif [ -f "${prev_rfile}.nc" ]; then - cp ${prev_rfile}.nc ${dname_result_link}/fesom.${aux_yr}.oce.restart.nc - else - echo -e "\033[1;31m --> ERROR: could not find ocean restart file \033[0m" - exit - fi - - # copy restart ice files/files/directories from previous spinup cycle - prev_rfile=${dname_result}${prev_chain_id}/fesom.${year_e}.ice.restart - if [ -d "${prev_rfile}" ]; then - cp -r ${prev_rfile} ${dname_result_link}/fesom.${aux_yr}.ice.restart - elif [ -f "${prev_rfile}.nc" ]; then - cp ${prev_rfile}.nc ${dname_result_link}/fesom.${aux_yr}.ice.restart.nc - else - echo -e "\033[1;31m --> ERROR: could not find ice restart file \033[0m" - exit - fi - - #_______________________________________________________________________ - # adapt year new in namelist.config otherwise fesom is not doing a - # restart - aux=$(grep "yearnew=" namelist.config | cut -d "=" -f 2 | cut -d " " -f 1 ) - sed -i " s/yearnew=$aux/yearnew=$aux_yr/" namelist.config - - #_______________________________________________________________________ - # backup namelist.* & fesom.x in case they dont exist - if [ ! -f "${dname_result}/namelist.config" ]; then - cp namelist.config namelist.oce namelist.ice namelist.forcing namelist.io \ - namelist.cvmix namelist.tra namelist.dyn ${dname_result}/. - fi - if [ ! -f "${dname_result}/fesom.x" ]; then - cp fesom.x ${dname_result}/. - fi - #___BACKUP SRC FILES INTO RESULT DIRECTORY______________________________ - if [ ! -d "${dname_result}/src" ]; then mkdir "${dname_result}/src" ; fi - cp ../src/*.F90 ${dname_result}/src/. - fi -fi - -#___DETERMINE SLURM JOBID+OUTPUTFILE____________________________________________ -jobid=$(echo $SLURM_JOB_ID | cut -d"." -f1) -fname="fesom2.0_${SLURM_JOB_NAME}_${jobid}.out" - -#___PUT JOB IN QUEUE____________________________________________________________ -date -srun --mpi=pmi2 ./fesom.x >> ${fname} -err_srun=$? -echo -e "\033[33m --> err_srun=${err_srun} \033[0m" -date - -#___SHOW JOB INFORMATION________________________________________________________ -scontrol show job $SLURM_JOB_ID - -#___SETUP JOBCHAIN______________________________________________________________ -# check if complete cycle is finished only than increase chain_id -aux_yr_clock=$(<${dname_result_link}/fesom.clock) -aux_yr_clock=$(echo ${aux_yr_clock} | cut -d" " -f 6) - -# setup next chain job via dependence -if [ ${err_srun} -eq 0 ]; then - if [ $aux_yr_clock -lt $year_e ] || [ ${chain_id} -lt ${chain_n} ]; then - # aftercorr:job_id --> A task of this job array can begin execution after - # the corresponding task ID in the specified job has completed successfully - # (ran to completion with an exit code of zero). - echo -e "\033[33m --> setup next chain cycle \033[0m" - sbatch --time=$wcl --ntasks=$ntasks --job-name=${SLURM_JOB_NAME} --dependency=aftercorr:$SLURM_JOB_ID $script_name \ - -cn $chain_n -cs $chain_s -ys $year_s -ye $year_e -rl $prescribe_rlen - fi -fi - -#___CHECK FOR COMPLETNES________________________________________________________ -# check if complete cycle is finished only than increase chain_id -if [ $aux_yr_clock -gt $year_e ] && [ ${chain_id} -lt ${chain_n} ] ; then - chain_id=$(( $chain_id + 1 )) - echo $chain_id > file_chain_id -fi -